added OpenShifts fork of Kubernetes

reason for this is that openshift/kubernetes backported
k8s.io/kubernetes/pkg/securitycontextconstraints/util
that is currently required by something that
github.com/openshift/origin/pkg/deploy/api/v1 depends on
This commit is contained in:
Tomas Kral 2016-07-20 16:23:09 +02:00
parent 2626b51dee
commit 3db5069ff5
224 changed files with 95644 additions and 85623 deletions

329
Godeps/Godeps.json generated
View File

@ -37,6 +37,11 @@
"ImportPath": "github.com/davecgh/go-spew/spew",
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
},
{
"ImportPath": "github.com/dgrijalva/jwt-go",
"Comment": "v2.2.0-23-g5ca8014",
"Rev": "5ca80149b9d3f8b863af0e2bb6742e608603bd99"
},
{
"ImportPath": "github.com/docker/distribution",
"Comment": "v2.5.0-rc.1-22-g4e17ab5",
@ -501,6 +506,21 @@
"ImportPath": "github.com/docker/libtrust",
"Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a"
},
{
"ImportPath": "github.com/emicklei/go-restful",
"Comment": "v1.2-54-g7c47e25",
"Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a"
},
{
"ImportPath": "github.com/emicklei/go-restful/log",
"Comment": "v1.2-54-g7c47e25",
"Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a"
},
{
"ImportPath": "github.com/emicklei/go-restful/swagger",
"Comment": "v1.2-54-g7c47e25",
"Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a"
},
{
"ImportPath": "github.com/fatih/structs",
"Rev": "be738c8546f55b34e60125afa50ed73a9a9c460e"
@ -631,16 +651,81 @@
"Comment": "v0.0.7",
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
},
{
"ImportPath": "github.com/openshift/origin/pkg/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/api/extension",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/authorization/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/build/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/deploy/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/deploy/api/v1",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/image/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/oauth/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/project/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/route/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/sdn/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/security/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/template/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/user/api",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/openshift/origin/pkg/util/namer",
"Comment": "v1.3.0-alpha.2-1281-g2e48c47",
"Rev": "2e48c47ce0371eab4d23ce32c0fec6de2e964dc1"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
@ -693,6 +778,14 @@
"ImportPath": "golang.org/x/net/context",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/http2",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/proxy",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
@ -712,173 +805,273 @@
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/endpoints",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/errors",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/meta",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/meta/metatypes",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/pod",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/resource",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/service",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/unversioned",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/unversioned/validation",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/util",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/v1",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/api/validation",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/batch",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/auth/user",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/capabilities",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/conversion",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/fields",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/labels",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/streaming",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/securitycontextconstraints/util",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/serviceaccount",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/types",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/errors",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/framer",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/hash",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/intstr",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/json",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/net",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/net/sets",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/parsers",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/rand",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/runtime",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/sets",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/validation",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/validation/field",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/wait",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/util/yaml",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/watch",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/pkg/watch/versioned",
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
},
{
"ImportPath": "k8s.io/kubernetes/third_party/forked/reflect",
"Comment": "v1.3.0",
"Rev": "283137936a498aed572ee22af6774b6fb6e9fd94"
"Comment": "v1.3.0-58-g57fb9ac",
"Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6"
}
]
}

4
vendor/github.com/dgrijalva/jwt-go/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
.DS_Store
bin

8
vendor/github.com/dgrijalva/jwt-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,8 @@
Copyright (c) 2012 Dave Grijalva
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

61
vendor/github.com/dgrijalva/jwt-go/README.md generated vendored Normal file
View File

@ -0,0 +1,61 @@
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html)
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
## What the heck is a JWT?
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are RSA256 and HMAC SHA256, though hooks are present for adding your own.
## Parse and Verify
Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used.
```go
token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
return myLookupKey(token.Header["kid"])
})
if err == nil && token.Valid {
deliverGoodness("!")
} else {
deliverUtterRejection(":(")
}
```
## Create a token
```go
// Create the token
token := jwt.New(jwt.SigningMethodHS256)
// Set some claims
token.Claims["foo"] = "bar"
token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix()
// Sign and get the complete encoded token as a string
tokenString, err := token.SignedString(mySigningKey)
```
## Project Status & Versioning
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b).

54
vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
## `jwt-go` Version History
#### 2.2.0
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
#### 2.1.0
Backwards compatible API change that was missed in 2.0.0.
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
#### 2.0.0
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
* **Compatibility Breaking Changes**
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
* `KeyFunc` now returns `interface{}` instead of `[]byte`
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodHS256`
* Added public package global `SigningMethodHS384`
* Added public package global `SigningMethodHS512`
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodRS256`
* Added public package global `SigningMethodRS384`
* Added public package global `SigningMethodRS512`
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
* Refactored the RSA implementation to be easier to read
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
#### 1.0.2
* Fixed bug in parsing public keys from certificates
* Added more tests around the parsing of keys for RS256
* Code refactoring in RS256 implementation. No functional changes
#### 1.0.1
* Fixed panic if RS256 signing method was passed an invalid key
#### 1.0.0
* First versioned release
* API stabilized
* Supports creating, signing, parsing, and validating JWT tokens
* Supports RS256 and HS256 signing methods

4
vendor/github.com/dgrijalva/jwt-go/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
//
// See README.md for more info.
package jwt

43
vendor/github.com/dgrijalva/jwt-go/errors.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package jwt
import (
"errors"
)
// Error constants
var (
ErrInvalidKey = errors.New("key is invalid or of invalid type")
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
ErrNoTokenInRequest = errors.New("no token present in request")
)
// The errors that might occur when parsing and validating a token
const (
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
ValidationErrorUnverifiable // Token could not be verified because of signing problems
ValidationErrorSignatureInvalid // Signature validation failed
ValidationErrorExpired // Exp validation failed
ValidationErrorNotValidYet // NBF validation failed
)
// The error from Parse if token is not valid
type ValidationError struct {
err string
Errors uint32 // bitfield. see ValidationError... constants
}
// Validation error is an error type
func (e ValidationError) Error() string {
if e.err == "" {
return "token is invalid"
}
return e.err
}
// No errors
func (e *ValidationError) valid() bool {
if e.Errors > 0 {
return false
}
return true
}

84
vendor/github.com/dgrijalva/jwt-go/hmac.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
package jwt
import (
"crypto"
"crypto/hmac"
"errors"
)
// Implements the HMAC-SHA family of signing methods signing methods
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
}
// Specific instances for HS256 and company
var (
SigningMethodHS256 *SigningMethodHMAC
SigningMethodHS384 *SigningMethodHMAC
SigningMethodHS512 *SigningMethodHMAC
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
// HS256
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
return SigningMethodHS256
})
// HS384
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
return SigningMethodHS384
})
// HS512
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
return SigningMethodHS512
})
}
func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
if keyBytes, ok := key.([]byte); ok {
var sig []byte
var err error
if sig, err = DecodeSegment(signature); err == nil {
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
if !hmac.Equal(sig, hasher.Sum(nil)) {
err = ErrSignatureInvalid
}
}
return err
}
return ErrInvalidKey
}
// Implements the Sign method from SigningMethod for this signing method.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
}
return "", ErrInvalidKey
}

198
vendor/github.com/dgrijalva/jwt-go/jwt.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package jwt
import (
"encoding/base64"
"encoding/json"
"net/http"
"strings"
"time"
)
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
// You can override it to use another time value. This is useful for testing or if your
// server uses a different time zone than your tokens.
var TimeFunc = time.Now
// Parse methods use this callback function to supply
// the key for verification. The function receives the parsed,
// but unverified Token. This allows you to use propries in the
// Header of the token (such as `kid`) to identify which key to use.
type Keyfunc func(*Token) (interface{}, error)
// A JWT Token. Different fields will be used depending on whether you're
// creating or parsing/verifying a token.
type Token struct {
Raw string // The raw token. Populated when you Parse a token
Method SigningMethod // The signing method used or to be used
Header map[string]interface{} // The first segment of the token
Claims map[string]interface{} // The second segment of the token
Signature string // The third segment of the token. Populated when you Parse a token
Valid bool // Is the token valid? Populated when you Parse/Verify a token
}
// Create a new Token. Takes a signing method
func New(method SigningMethod) *Token {
return &Token{
Header: map[string]interface{}{
"typ": "JWT",
"alg": method.Alg(),
},
Claims: make(map[string]interface{}),
Method: method,
}
}
// Get the complete, signed token
func (t *Token) SignedString(key interface{}) (string, error) {
var sig, sstr string
var err error
if sstr, err = t.SigningString(); err != nil {
return "", err
}
if sig, err = t.Method.Sign(sstr, key); err != nil {
return "", err
}
return strings.Join([]string{sstr, sig}, "."), nil
}
// Generate the signing string. This is the
// most expensive part of the whole deal. Unless you
// need this for something special, just go straight for
// the SignedString.
func (t *Token) SigningString() (string, error) {
var err error
parts := make([]string, 2)
for i, _ := range parts {
var source map[string]interface{}
if i == 0 {
source = t.Header
} else {
source = t.Claims
}
var jsonValue []byte
if jsonValue, err = json.Marshal(source); err != nil {
return "", err
}
parts[i] = EncodeSegment(jsonValue)
}
return strings.Join(parts, "."), nil
}
// Parse, validate, and return a token.
// keyFunc will receive the parsed token and should return the key for validating.
// If everything is kosher, err will be nil
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
parts := strings.Split(tokenString, ".")
if len(parts) != 3 {
return nil, &ValidationError{err: "token contains an invalid number of segments", Errors: ValidationErrorMalformed}
}
var err error
token := &Token{Raw: tokenString}
// parse Header
var headerBytes []byte
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
// parse Claims
var claimBytes []byte
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
if err = json.Unmarshal(claimBytes, &token.Claims); err != nil {
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed}
}
// Lookup signature method
if method, ok := token.Header["alg"].(string); ok {
if token.Method = GetSigningMethod(method); token.Method == nil {
return token, &ValidationError{err: "signing method (alg) is unavailable.", Errors: ValidationErrorUnverifiable}
}
} else {
return token, &ValidationError{err: "signing method (alg) is unspecified.", Errors: ValidationErrorUnverifiable}
}
// Lookup key
var key interface{}
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
return token, &ValidationError{err: "no Keyfunc was provided.", Errors: ValidationErrorUnverifiable}
}
if key, err = keyFunc(token); err != nil {
// keyFunc returned an error
return token, &ValidationError{err: err.Error(), Errors: ValidationErrorUnverifiable}
}
// Check expiration times
vErr := &ValidationError{}
now := TimeFunc().Unix()
if exp, ok := token.Claims["exp"].(float64); ok {
if now > int64(exp) {
vErr.err = "token is expired"
vErr.Errors |= ValidationErrorExpired
}
}
if nbf, ok := token.Claims["nbf"].(float64); ok {
if now < int64(nbf) {
vErr.err = "token is not valid yet"
vErr.Errors |= ValidationErrorNotValidYet
}
}
// Perform validation
if err = token.Method.Verify(strings.Join(parts[0:2], "."), parts[2], key); err != nil {
vErr.err = err.Error()
vErr.Errors |= ValidationErrorSignatureInvalid
}
if vErr.valid() {
token.Valid = true
return token, nil
}
return token, vErr
}
// Try to find the token in an http.Request.
// This method will call ParseMultipartForm if there's no token in the header.
// Currently, it looks in the Authorization header as well as
// looking for an 'access_token' request parameter in req.Form.
func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) {
// Look for an Authorization header
if ah := req.Header.Get("Authorization"); ah != "" {
// Should be a bearer token
if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" {
return Parse(ah[7:], keyFunc)
}
}
// Look for "access_token" parameter
req.ParseMultipartForm(10e6)
if tokStr := req.Form.Get("access_token"); tokStr != "" {
return Parse(tokStr, keyFunc)
}
return nil, ErrNoTokenInRequest
}
// Encode JWT specific base64url encoding with padding stripped
func EncodeSegment(seg []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
}
// Decode JWT specific base64url encoding with padding stripped
func DecodeSegment(seg string) ([]byte, error) {
if l := len(seg) % 4; l > 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}

114
vendor/github.com/dgrijalva/jwt-go/rsa.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package jwt
import (
"crypto"
"crypto/rand"
"crypto/rsa"
)
// Implements the RSA family of signing methods signing methods
type SigningMethodRSA struct {
Name string
Hash crypto.Hash
}
// Specific instances for RS256 and company
var (
SigningMethodRS256 *SigningMethodRSA
SigningMethodRS384 *SigningMethodRSA
SigningMethodRS512 *SigningMethodRSA
)
func init() {
// RS256
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
return SigningMethodRS256
})
// RS384
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
return SigningMethodRS384
})
// RS512
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
return SigningMethodRS512
})
}
func (m *SigningMethodRSA) Alg() string {
return m.Name
}
// Implements the Verify method from SigningMethod
// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as
// []byte, or an rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
var rsaKey *rsa.PublicKey
switch k := key.(type) {
case []byte:
if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil {
return err
}
case *rsa.PublicKey:
rsaKey = k
default:
return ErrInvalidKey
}
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Verify the signature
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
}
// Implements the Sign method from SigningMethod
// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as
// []byte, or an rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var err error
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
case []byte:
if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil {
return "", err
}
case *rsa.PrivateKey:
rsaKey = k
default:
return "", ErrInvalidKey
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
return EncodeSegment(sigBytes), nil
} else {
return "", err
}
}

68
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package jwt
import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
)
var (
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
)
// Parse PEM encoded PKCS1 or PKCS8 private key
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
}
}
var pkey *rsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
var pkey *rsa.PublicKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}

24
vendor/github.com/dgrijalva/jwt-go/signing_method.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package jwt
var signingMethods = map[string]func() SigningMethod{}
// Signing method
type SigningMethod interface {
Verify(signingString, signature string, key interface{}) error
Sign(signingString string, key interface{}) (string, error)
Alg() string
}
// Register the "alg" name and a factory function for signing method.
// This is typically done during init() in the method's implementation
func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethods[alg] = f
}
// Get a signing method from an "alg" string
func GetSigningMethod(alg string) (method SigningMethod) {
if methodF, ok := signingMethods[alg]; ok {
method = methodF()
}
return
}

70
vendor/github.com/emicklei/go-restful/.gitignore generated vendored Normal file
View File

@ -0,0 +1,70 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
restful.html
*.out
tmp.prof
go-restful.test
examples/restful-basic-authentication
examples/restful-encoding-filter
examples/restful-filters
examples/restful-hello-world
examples/restful-resource-functions
examples/restful-serve-static
examples/restful-user-service
*.DS_Store
examples/restful-user-resource
examples/restful-multi-containers
examples/restful-form-handling
examples/restful-CORS-filter
examples/restful-options-filter
examples/restful-curly-router
examples/restful-cpuprofiler-service
examples/restful-pre-post-filters
curly.prof
examples/restful-NCSA-logging
examples/restful-html-template
s.html
restful-path-tail

163
vendor/github.com/emicklei/go-restful/CHANGES.md generated vendored Normal file
View File

@ -0,0 +1,163 @@
Change history of go-restful
=
2016-02-14
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
- add constructors for custom entity accessors for xml and json
2015-09-27
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25
- fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters.
2015-08-06
- add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI
2015-03-20
- add configurable logging
2015-03-18
- if not specified, the Operation is derived from the Route function
2015-03-17
- expose Parameter creation functions
- make trace logger an interface
- fix OPTIONSFilter
- customize rendering of ServiceError
- JSR311 router now handles wildcards
- add Notes to Route
2014-11-27
- (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12
- (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10
- (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31
- (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs
- sort Swagger response messages by code
2014-10-23
- (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS
- add tracing functionality (injectable) for debugging purposes
- support JSON parse 64bit int
- fix empty parameters for swagger
- WebServicesUrl is now optional for swagger
- fixed duplicate AccessControlAllowOrigin in CORS
- (api change) expose ServeMux in container
- (api add) added AllowedDomains in CORS
- (api add) ParameterNamed for detailed documentation
2014-04-16
- (api add) expose constructor of Request for testing.
2014-06-27
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03
- (api add) CORS can be configured with a list of allowed domains
2014-03-12
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17
- (api change) renamed parameter constants (go-lint checks)
2014-01-10
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07
- (api change) Write* methods in Response now return the error or nil.
- added example of serving HTML from a Go template.
- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13
- (api add) Response knows how many bytes are written to the response body.
2013-10-29
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04
- (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12
- (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05
- add OPTIONS support
- add CORS support
2013-08-27
- fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- (api add) the swagger package has be extended to have a UI per container.
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
- (api add) WriteErrorString to Response
Important API changes:
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03
- (api change) removed Dispatcher interface, hide PathExpression
- changed receiver names of type functions to be more idiomatic Go
2013-06-02
- (optimize) Cache the RegExp compilation of Paths.
2013-05-22
- (api add) Added support for request/response filter functions
2013-05-18
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18>
- See https://github.com/emicklei/go-restful/commits
2012-11-14
- Initial commit

22
vendor/github.com/emicklei/go-restful/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2012,2013 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

74
vendor/github.com/emicklei/go-restful/README.md generated vendored Normal file
View File

@ -0,0 +1,74 @@
go-restful
==========
package for building REST-style Web Services using Google Go
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
- PUT = Create if you are sending the full content of the specified resource (URI).
- PUT = Update if you are updating the full content of the specified resource.
- DELETE = Delete if you are requesting the server to delete the resource
- PATCH = Update partial content of a resource
- OPTIONS = Get information about the communication options for the request URI
### Example
```Go
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser).
Doc("get a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
Writes(User{}))
...
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
```
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
### Features
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but doest **not** accept) regular expressions (See RouterJSR311 which is used by default)
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see swagger package)
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
- Customizable encoding using EntityReaderWriter registration
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
Type ```git shortlog -s``` for a full list of contributors.

1
vendor/github.com/emicklei/go-restful/Srcfile generated vendored Normal file
View File

@ -0,0 +1 @@
{"SkipDirs": ["examples"]}

10
vendor/github.com/emicklei/go-restful/bench_test.sh generated vendored Normal file
View File

@ -0,0 +1,10 @@
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
go test -c
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
#go tool pprof go-restful.test tmp.prof
go tool pprof go-restful.test curly.prof

123
vendor/github.com/emicklei/go-restful/compress.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bufio"
"compress/gzip"
"compress/zlib"
"errors"
"io"
"net"
"net/http"
"strings"
)
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
var EnableContentEncoding = false
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
type CompressingResponseWriter struct {
writer http.ResponseWriter
compressor io.WriteCloser
encoding string
}
// Header is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) Header() http.Header {
return c.writer.Header()
}
// WriteHeader is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) WriteHeader(status int) {
c.writer.WriteHeader(status)
}
// Write is part of http.ResponseWriter interface
// It is passed through the compressor
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
if c.isCompressorClosed() {
return -1, errors.New("Compressing error: tried to write data using closed compressor")
}
return c.compressor.Write(bytes)
}
// CloseNotify is part of http.CloseNotifier interface
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
return errors.New("Compressing error: tried to close already closed compressor")
}
c.compressor.Close()
if ENCODING_GZIP == c.encoding {
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
}
if ENCODING_DEFLATE == c.encoding {
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
}
// gc hint needed?
c.compressor = nil
return nil
}
func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor
}
// Hijack implements the Hijacker interface
// This is especially useful when combining Container.EnabledContentEncoding
// in combination with websockets (for instance gorilla/websocket)
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := c.writer.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
}
return hijacker.Hijack()
}
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
gi := strings.Index(header, ENCODING_GZIP)
zi := strings.Index(header, ENCODING_DEFLATE)
// use in order of appearance
if gi == -1 {
return zi != -1, ENCODING_DEFLATE
} else if zi == -1 {
return gi != -1, ENCODING_GZIP
} else {
if gi < zi {
return true, ENCODING_GZIP
}
return true, ENCODING_DEFLATE
}
}
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
c := new(CompressingResponseWriter)
c.writer = httpWriter
var err error
if ENCODING_GZIP == encoding {
w := currentCompressorProvider.AcquireGzipWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_GZIP
} else if ENCODING_DEFLATE == encoding {
w := currentCompressorProvider.AcquireZlibWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_DEFLATE
} else {
return nil, errors.New("Unknown encoding:" + encoding)
}
return c, err
}

View File

@ -0,0 +1,103 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
// of writers and readers (resources).
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
type BoundedCachedCompressors struct {
gzipWriters chan *gzip.Writer
gzipReaders chan *gzip.Reader
zlibWriters chan *zlib.Writer
writersCapacity int
readersCapacity int
}
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
b := &BoundedCachedCompressors{
gzipWriters: make(chan *gzip.Writer, writersCapacity),
gzipReaders: make(chan *gzip.Reader, readersCapacity),
zlibWriters: make(chan *zlib.Writer, writersCapacity),
writersCapacity: writersCapacity,
readersCapacity: readersCapacity,
}
for ix := 0; ix < writersCapacity; ix++ {
b.gzipWriters <- newGzipWriter()
b.zlibWriters <- newZlibWriter()
}
for ix := 0; ix < readersCapacity; ix++ {
b.gzipReaders <- newGzipReader()
}
return b
}
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
var writer *gzip.Writer
select {
case writer, _ = <-b.gzipWriters:
default:
// return a new unmanaged one
writer = newGzipWriter()
}
return writer
}
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
// forget the unmanaged ones
if len(b.gzipWriters) < b.writersCapacity {
b.gzipWriters <- w
}
}
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
var reader *gzip.Reader
select {
case reader, _ = <-b.gzipReaders:
default:
// return a new unmanaged one
reader = newGzipReader()
}
return reader
}
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
// forget the unmanaged ones
if len(b.gzipReaders) < b.readersCapacity {
b.gzipReaders <- r
}
}
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
var writer *zlib.Writer
select {
case writer, _ = <-b.zlibWriters:
default:
// return a new unmanaged one
writer = newZlibWriter()
}
return writer
}
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
// forget the unmanaged ones
if len(b.zlibWriters) < b.writersCapacity {
b.zlibWriters <- w
}
}

View File

@ -0,0 +1,91 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"compress/gzip"
"compress/zlib"
"sync"
)
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
type SyncPoolCompessors struct {
GzipWriterPool *sync.Pool
GzipReaderPool *sync.Pool
ZlibWriterPool *sync.Pool
}
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
func NewSyncPoolCompessors() *SyncPoolCompessors {
return &SyncPoolCompessors{
GzipWriterPool: &sync.Pool{
New: func() interface{} { return newGzipWriter() },
},
GzipReaderPool: &sync.Pool{
New: func() interface{} { return newGzipReader() },
},
ZlibWriterPool: &sync.Pool{
New: func() interface{} { return newZlibWriter() },
},
}
}
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
return s.GzipWriterPool.Get().(*gzip.Writer)
}
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
s.GzipWriterPool.Put(w)
}
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
return s.GzipReaderPool.Get().(*gzip.Reader)
}
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
s.GzipReaderPool.Put(r)
}
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
return s.ZlibWriterPool.Get().(*zlib.Writer)
}
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
s.ZlibWriterPool.Put(w)
}
func newGzipWriter() *gzip.Writer {
// create with an empty bytes writer; it will be replaced before using the gzipWriter
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}
func newGzipReader() *gzip.Reader {
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
// we can safely use currentCompressProvider because it is set on package initialization.
w := currentCompressorProvider.AcquireGzipWriter()
defer currentCompressorProvider.ReleaseGzipWriter(w)
b := new(bytes.Buffer)
w.Reset(b)
w.Flush()
w.Close()
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
if err != nil {
panic(err.Error())
}
return reader
}
func newZlibWriter() *zlib.Writer {
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}

53
vendor/github.com/emicklei/go-restful/compressors.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer
// Releases an aqcuired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader
// Releases an aqcuired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer
// Releases an aqcuired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer)
}
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
var currentCompressorProvider CompressorProvider
func init() {
currentCompressorProvider = NewSyncPoolCompessors()
}
// CurrentCompressorProvider returns the current CompressorProvider.
// It is initialized using a SyncPoolCompessors.
func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider
}
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) {
if p == nil {
panic("cannot set compressor provider to nil")
}
currentCompressorProvider = p
}

30
vendor/github.com/emicklei/go-restful/constants.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
ENCODING_GZIP = "gzip"
ENCODING_DEFLATE = "deflate"
)

361
vendor/github.com/emicklei/go-restful/container.go generated vendored Normal file
View File

@ -0,0 +1,361 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"sync"
"github.com/emicklei/go-restful/log"
)
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
// The requests are further dispatched to routes of WebServices using a RouteSelector
type Container struct {
webServicesLock sync.RWMutex
webServices []*WebService
ServeMux *http.ServeMux
isRegisteredOnRoot bool
containerFilters []FilterFunction
doNotRecover bool // default is false
recoverHandleFunc RecoverHandleFunction
serviceErrorHandleFunc ServiceErrorHandleFunction
router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
contentEncodingEnabled bool // default is false
}
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
func NewContainer() *Container {
return &Container{
webServices: []*WebService{},
ServeMux: http.NewServeMux(),
isRegisteredOnRoot: false,
containerFilters: []FilterFunction{},
doNotRecover: false,
recoverHandleFunc: logStackOnRecover,
serviceErrorHandleFunc: writeServiceError,
router: RouterJSR311{},
contentEncodingEnabled: false}
}
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
// The first argument is what recover() returns. The second must be used to communicate an error response.
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
// RecoverHandler changes the default function (logStackOnRecover) to be called
// when a panic is detected. DoNotRecover must be have its default value (=false).
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
c.recoverHandleFunc = handler
}
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
// The first argument is the service error, the second is the request that resulted in the error and
// the third must be used to communicate an error response.
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
// ServiceErrorHandler changes the default function (writeServiceError) to be called
// when a ServiceError is detected.
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
c.serviceErrorHandleFunc = handler
}
// DoNotRecover controls whether panics will be caught to return HTTP 500.
// If set to true, Route functions are responsible for handling any error situation.
// Default value is false = recover from panics. This has performance implications.
func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot
}
// Router changes the default Router (currently RouterJSR311)
func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter
}
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
func (c *Container) EnableContentEncoding(enabled bool) {
c.contentEncodingEnabled = enabled
}
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
func (c *Container) Add(service *WebService) *Container {
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// if rootPath was not set then lazy initialize it
if len(service.rootPath) == 0 {
service.Path("/")
}
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
os.Exit(1)
}
}
// If not registered on root then add specific mapping
if !c.isRegisteredOnRoot {
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
}
c.webServices = append(c.webServices, service)
return c
}
// addHandler may set a new HandleFunc for the serveMux
// this function must run inside the critical region protected by the webServicesLock.
// returns true if the function was registered on root ("/")
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
pattern := fixedPrefixPath(service.RootPath())
// check if root path registration is needed
if "/" == pattern || "" == pattern {
serveMux.HandleFunc("/", c.dispatch)
return true
}
// detect if registration already exists
alreadyMapped := false
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
alreadyMapped = true
break
}
}
if !alreadyMapped {
serveMux.HandleFunc(pattern, c.dispatch)
if !strings.HasSuffix(pattern, "/") {
serveMux.HandleFunc(pattern+"/", c.dispatch)
}
}
return false
}
func (c *Container) Remove(ws *WebService) error {
if c.ServeMux == http.DefaultServeMux {
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
log.Printf(errMsg)
return errors.New(errMsg)
}
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// build a new ServeMux and re-register all WebServices
newServeMux := http.NewServeMux()
newServices := []*WebService{}
newIsRegisteredOnRoot := false
for _, each := range c.webServices {
if each.rootPath != ws.rootPath {
// If not registered on root then add specific mapping
if !newIsRegisteredOnRoot {
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
}
newServices = append(newServices, each)
}
}
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
return nil
}
// logStackOnRecover is the default RecoverHandleFunction and is called
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
// Default implementation logs the stacktrace and writes the stacktrace on the response.
// This may be a security issue as it exposes sourcecode information.
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
for i := 2; ; i += 1 {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
}
log.Print(buffer.String())
httpWriter.WriteHeader(http.StatusInternalServerError)
httpWriter.Write(buffer.Bytes())
}
// writeServiceError is the default ServiceErrorHandleFunction and is called
// when a ServiceError is returned during route selection. Default implementation
// calls resp.WriteErrorString(err.Code, err.Message)
func writeServiceError(err ServiceError, req *Request, resp *Response) {
resp.WriteErrorString(err.Code, err.Message)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter
// CompressingResponseWriter should be closed after all operations are done
defer func() {
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
compressWriter.Close()
}
}()
// Instal panic recovery unless told otherwise
if !c.doNotRecover { // catch all for 500 response
defer func() {
if r := recover(); r != nil {
c.recoverHandleFunc(r, writer)
return
}
}()
}
// Install closing the request body (if any)
defer func() {
if nil != httpRequest.Body {
httpRequest.Body.Close()
}
}()
// Detect if compression is needed
// assume without compression, test for override
if c.contentEncodingEnabled {
doCompress, encoding := wantsCompressedResponse(httpRequest)
if doCompress {
var err error
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
if err != nil {
log.Print("[restful] unable to install compressor: ", err)
httpWriter.WriteHeader(http.StatusInternalServerError)
return
}
}
}
// Find best match Route ; err is non nil if no match was found
var webService *WebService
var route *Route
var err error
func() {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
webService, route, err = c.router.SelectRoute(
c.webServices,
httpRequest)
}()
if err != nil {
// a non-200 response has already been written
// run container filters anyway ; they should not touch the response...
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
switch err.(type) {
case ServiceError:
ser := err.(ServiceError)
c.serviceErrorHandleFunc(ser, req, resp)
}
// TODO
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
return
}
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
// pass through filters (if any)
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
// compose filter chain
allFilters := []FilterFunction{}
allFilters = append(allFilters, c.containerFilters...)
allFilters = append(allFilters, webService.filters...)
allFilters = append(allFilters, route.Filters...)
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
// handle request by route after passing all filters
route.Function(wrappedRequest, wrappedResponse)
}}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// no filters, handle request by route
route.Function(wrappedRequest, wrappedResponse)
}
}
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
func fixedPrefixPath(pathspec string) string {
varBegin := strings.Index(pathspec, "{")
if -1 == varBegin {
return pathspec
}
return pathspec[:varBegin]
}
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
}
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
func (c Container) Handle(pattern string, handler http.Handler) {
c.ServeMux.Handle(pattern, handler)
}
// HandleWithFilter registers the handler for the given pattern.
// Container's filter chain is applied for handler.
// If a handler already exists for pattern, HandleWithFilter panics.
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
if len(c.containerFilters) == 0 {
handler.ServeHTTP(httpResponse, httpRequest)
return
}
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
handler.ServeHTTP(httpResponse, httpRequest)
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
}
c.Handle(pattern, http.HandlerFunc(f))
}
// Filter appends a container FilterFunction. These are called before dispatching
// a http.Request to a WebService from the container
func (c *Container) Filter(filter FilterFunction) {
c.containerFilters = append(c.containerFilters, filter)
}
// RegisteredWebServices returns the collections of added WebServices
func (c Container) RegisteredWebServices() []*WebService {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
result := make([]*WebService, len(c.webServices))
for ix := range c.webServices {
result[ix] = c.webServices[ix]
}
return result
}
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
func (c Container) computeAllowedMethods(req *Request) []string {
// Go through all RegisteredWebServices() and all its Routes to collect the options
methods := []string{}
requestPath := req.Request.URL.Path
for _, ws := range c.RegisteredWebServices() {
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
finalMatch := matches[len(matches)-1]
for _, rt := range ws.Routes() {
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
if matches != nil {
lastMatch := matches[len(matches)-1]
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor /.
methods = append(methods, rt.Method)
}
}
}
}
}
// methods = append(methods, "OPTIONS") not sure about this
return methods
}
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
// It is basic because no parameter or (produces) content-type information is given.
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
resp := NewResponse(httpWriter)
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
return NewRequest(httpRequest), resp
}

202
vendor/github.com/emicklei/go-restful/cors_filter.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"regexp"
"strconv"
"strings"
)
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
//
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
// http://enable-cors.org/server.html
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
type CrossOriginResourceSharing struct {
ExposeHeaders []string // list of Header names
AllowedHeaders []string // list of Header names
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
AllowedMethods []string
MaxAge int // number of seconds before requiring new Options request
CookiesAllowed bool
Container *Container
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
}
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
origin := req.Request.Header.Get(HEADER_Origin)
if len(origin) == 0 {
if trace {
traceLogger.Print("no Http header Origin set")
}
chain.ProcessFilter(req, resp)
return
}
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
if trace {
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
}
chain.ProcessFilter(req, resp)
return
}
if req.Request.Method != "OPTIONS" {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
c.doPreflightRequest(req, resp)
} else {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
}
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
c.setOptionsHeaders(req, resp)
// continue processing the response
}
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
if len(c.AllowedMethods) == 0 {
if c.Container == nil {
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
} else {
c.AllowedMethods = c.Container.computeAllowedMethods(req)
}
}
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestMethod,
acrm,
c.AllowedMethods)
}
return
}
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
if len(acrhs) > 0 {
for _, each := range strings.Split(acrhs, ",") {
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestHeaders,
acrhs,
c.AllowedHeaders)
}
return
}
}
}
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
c.setOptionsHeaders(req, resp)
// return http 200 response, no body
}
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
c.checkAndSetExposeHeaders(resp)
c.setAllowOriginHeader(req, resp)
c.checkAndSetAllowCredentials(resp)
if c.MaxAge > 0 {
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
}
}
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
if len(origin) == 0 {
return false
}
if len(c.AllowedDomains) == 0 {
return true
}
allowed := false
for _, domain := range c.AllowedDomains {
if domain == origin {
allowed = true
break
}
}
if !allowed {
if len(c.allowedOriginPatterns) == 0 {
// compile allowed domains to allowed origin patterns
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
if err != nil {
return false
}
c.allowedOriginPatterns = allowedOriginRegexps
}
for _, pattern := range c.allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
}
return allowed
}
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
origin := req.Request.Header.Get(HEADER_Origin)
if c.isOriginAllowed(origin) {
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
}
}
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
if len(c.ExposeHeaders) > 0 {
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
}
}
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
if c.CookiesAllowed {
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
}
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
for _, each := range allowedMethods {
if each == method {
return true
}
}
return false
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
for _, each := range c.AllowedHeaders {
if strings.ToLower(each) == strings.ToLower(header) {
return true
}
}
return false
}
// Take a list of strings and compile them into a list of regular expressions.
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
regexps := []*regexp.Regexp{}
for _, regexpStr := range regexpStrings {
r, err := regexp.Compile(regexpStr)
if err != nil {
return regexps, err
}
regexps = append(regexps, r)
}
return regexps, nil
}

2
vendor/github.com/emicklei/go-restful/coverage.sh generated vendored Normal file
View File

@ -0,0 +1,2 @@
go test -coverprofile=coverage.out
go tool cover -html=coverage.out

162
vendor/github.com/emicklei/go-restful/curly.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"net/http"
"regexp"
"sort"
"strings"
)
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
type CurlyRouter struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (c CurlyRouter) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
requestTokens := tokenizePath(httpRequest.URL.Path)
detectedService := c.detectWebService(requestTokens, webServices)
if detectedService == nil {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
}
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
if len(candidateRoutes) == 0 {
if trace {
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
}
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
if selectedRoute == nil {
return detectedService, nil, err
}
return detectedService, selectedRoute, nil
}
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) []Route {
candidates := &sortableCurlyRoutes{[]*curlyRoute{}}
for _, each := range ws.routes {
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
if matches {
candidates.add(&curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(sort.Reverse(candidates))
return candidates.routes()
}
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
if len(routeTokens) < len(requestTokens) {
// proceed in matching only if last routeToken is wildcard
count := len(routeTokens)
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
return false, 0, 0
}
// proceed
}
for i, routeToken := range routeTokens {
if i == len(requestTokens) {
// reached end of request path
return false, 0, 0
}
requestToken := requestTokens[i]
if strings.HasPrefix(routeToken, "{") {
paramCount++
if colon := strings.Index(routeToken, ":"); colon != -1 {
// match by regex
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
if !matchesToken {
return false, 0, 0
}
if matchesRemainder {
break
}
}
} else { // no { prefix
if requestToken != routeToken {
return false, 0, 0
}
staticCount++
}
}
return true, paramCount, staticCount
}
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
regPart := routeToken[colon+1 : len(routeToken)-1]
if regPart == "*" {
if trace {
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
}
return true, true
}
matched, err := regexp.MatchString(regPart, requestToken)
return (matched && err == nil), false
}
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
// headers of the Request. See also RouterJSR311 in jsr311.go
func (c CurlyRouter) detectRoute(candidateRoutes []Route, httpRequest *http.Request) (*Route, error) {
// tracing is done inside detectRoute
return RouterJSR311{}.detectRoute(candidateRoutes, httpRequest)
}
// detectWebService returns the best matching webService given the list of path tokens.
// see also computeWebserviceScore
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
var best *WebService
score := -1
for _, each := range webServices {
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
if matches && (eachScore > score) {
best = each
score = eachScore
}
}
return best
}
// computeWebserviceScore returns whether tokens match and
// the weighted score of the longest matching consecutive tokens from the beginning.
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
if len(tokens) > len(requestTokens) {
return false, 0
}
score := 0
for i := 0; i < len(tokens); i++ {
each := requestTokens[i]
other := tokens[i]
if len(each) == 0 && len(other) == 0 {
score++
continue
}
if len(other) > 0 && strings.HasPrefix(other, "{") {
// no empty match
if len(each) == 0 {
return false, score
}
score += 1
} else {
// not a parameter
if each != other {
return false, score
}
score += (len(tokens) - i) * 10 //fuzzy
}
}
return true, score
}

54
vendor/github.com/emicklei/go-restful/curly_route.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
type curlyRoute struct {
route Route
paramCount int
staticCount int
}
type sortableCurlyRoutes struct {
candidates []*curlyRoute
}
func (s *sortableCurlyRoutes) add(route *curlyRoute) {
s.candidates = append(s.candidates, route)
}
func (s *sortableCurlyRoutes) routes() (routes []Route) {
for _, each := range s.candidates {
routes = append(routes, each.route) // TODO change return type
}
return routes
}
func (s *sortableCurlyRoutes) Len() int {
return len(s.candidates)
}
func (s *sortableCurlyRoutes) Swap(i, j int) {
s.candidates[i], s.candidates[j] = s.candidates[j], s.candidates[i]
}
func (s *sortableCurlyRoutes) Less(i, j int) bool {
ci := s.candidates[i]
cj := s.candidates[j]
// primary key
if ci.staticCount < cj.staticCount {
return true
}
if ci.staticCount > cj.staticCount {
return false
}
// secundary key
if ci.paramCount < cj.paramCount {
return true
}
if ci.paramCount > cj.paramCount {
return false
}
return ci.route.Path < cj.route.Path
}

196
vendor/github.com/emicklei/go-restful/doc.go generated vendored Normal file
View File

@ -0,0 +1,196 @@
/*
Package restful, a lean package for creating REST-style WebServices without magic.
WebServices and Routes
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
WebServices must be added to a container (see below) in order to handler Http requests from a server.
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
This package has the logic to find the best matching Route and if found, call its Function.
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
...
// GET http://localhost:8080/users/1
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
Regular expression matching Routes
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
This feature requires the use of a CurlyRouter.
Containers
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
The Default container of go-restful uses the http.DefaultServeMux.
You can create your own Container and create a new http.Server for that particular container.
container := restful.NewContainer()
server := &http.Server{Addr: ":8081", Handler: container}
Filters
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
In the restful package there are three hooks into the request,response flow where filters can be added.
Each filter must define a FilterFunction:
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
Use the following statement to pass the request,response pair to the next filter or RouteFunction
chain.ProcessFilter(req, resp)
Container Filters
These are processed before any registered WebService.
// install a (global) filter for the default container (processed before any webservice)
restful.Filter(globalLogging)
WebService Filters
These are processed before any Route of a WebService.
// install a webservice filter (processed before any route)
ws.Filter(webserviceLogging).Filter(measureTime)
Route Filters
These are processed before calling the function associated with the Route.
// install 2 chained route filters (processed before calling findUser)
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
Response Encoding
Two encodings are supported: gzip and deflate. To enable this for all responses:
restful.DefaultContainer.EnableContentEncoding(true)
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
OPTIONS support
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
Filter(OPTIONSFilter())
CORS
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
Filter(cors.Filter)
Error Handling
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
400: Bad Request
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
404: Not Found
Despite a valid URI, the resource requested may not be available
500: Internal Server Error
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
405: Method Not Allowed
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
406: Not Acceptable
The request does not have or has an unknown Accept Header set for this operation.
415: Unsupported Media Type
The request does not have or has an unknown Content-Type Header set for this operation.
ServiceError
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
restful.DefaultContainer.Router(CurlyRouter{})
The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
restful.DefaultContainer.DoNotRecover(true)
DoNotRecover controls whether panics will be caught to return HTTP 500.
If set to true, Route functions are responsible for handling any error situation.
Default value is false; it will recover from panics. This has performance implications.
restful.SetCacheReadEntity(false)
SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
Trouble shooting
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
Logging
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
preferred package is simple.
Resources
[project]: https://github.com/emicklei/go-restful
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
(c) 2012-2015, http://ernestmicklei.com. MIT License
*/
package restful

View File

@ -0,0 +1,163 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"encoding/json"
"encoding/xml"
"strings"
"sync"
)
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.
// The Request may have a decompressing reader. Depends on Content-Encoding.
Read(req *Request, v interface{}) error
// Write a serialized version of the value on the response.
// The Response may have a compressing writer. Depends on Accept-Encoding.
// status should be a valid Http Status code
Write(resp *Response, status int, v interface{}) error
}
// entityAccessRegistry is a singleton
var entityAccessRegistry = &entityReaderWriters{
protection: new(sync.RWMutex),
accessors: map[string]EntityReaderWriter{},
}
// entityReaderWriters associates MIME to an EntityReaderWriter
type entityReaderWriters struct {
protection *sync.RWMutex
accessors map[string]EntityReaderWriter
}
func init() {
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
}
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
entityAccessRegistry.protection.Lock()
defer entityAccessRegistry.protection.Unlock()
entityAccessRegistry.accessors[mime] = erw
}
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
// This package is already initialized with such an accessor using the MIME_JSON contentType.
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
return entityJSONAccess{ContentType: contentType}
}
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
// This package is already initialized with such an accessor using the MIME_XML contentType.
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
return entityXMLAccess{ContentType: contentType}
}
// accessorAt returns the registered ReaderWriter for this MIME type.
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
r.protection.RLock()
defer r.protection.RUnlock()
er, ok := r.accessors[mime]
if !ok {
// retry with reverse lookup
// more expensive but we are in an exceptional situation anyway
for k, v := range r.accessors {
if strings.Contains(mime, k) {
return v, true
}
}
}
return er, ok
}
// entityXMLAccess is a EntityReaderWriter for XML encoding
type entityXMLAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from XML
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
return xml.NewDecoder(req.Request.Body).Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
return writeXML(resp, status, e.ContentType, v)
}
// writeXML marshalls the value to JSON and set the Content-Type Header.
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := xml.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write([]byte(xml.Header))
if err != nil {
return err
}
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return xml.NewEncoder(resp).Encode(v)
}
// entityJSONAccess is a EntityReaderWriter for JSON encoding
type entityJSONAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from JSON
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
decoder := json.NewDecoder(req.Request.Body)
decoder.UseNumber()
return decoder.Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
return writeJSON(resp, status, e.ContentType, v)
}
// write marshalls the value to JSON and set the Content-Type Header.
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := json.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return json.NewEncoder(resp).Encode(v)
}

26
vendor/github.com/emicklei/go-restful/filter.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
type FilterChain struct {
Filters []FilterFunction // ordered list of FilterFunction
Index int // index into filters that is currently in progress
Target RouteFunction // function to call after passing all filters
}
// ProcessFilter passes the request,response pair through the next of Filters.
// Each filter can decide to proceed to the next Filter or handle the Response itself.
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
if f.Index < len(f.Filters) {
f.Index++
f.Filters[f.Index-1](request, response, f)
} else {
f.Target(request, response)
}
}
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
type FilterFunction func(*Request, *Response, *FilterChain)

9
vendor/github.com/emicklei/go-restful/install.sh generated vendored Normal file
View File

@ -0,0 +1,9 @@
cd examples
ls *.go | xargs -I {} go build -o /tmp/ignore {}
cd ..
go fmt ...swagger && \
go test -test.v ...swagger && \
go install ...swagger && \
go fmt ...restful && \
go test -test.v ...restful && \
go install ...restful

247
vendor/github.com/emicklei/go-restful/jsr311.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"fmt"
"net/http"
"sort"
)
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
// RouterJSR311 implements the Router interface.
// Concept of locators is not implemented.
type RouterJSR311 struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (r RouterJSR311) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
// Identify the root resource class (WebService)
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
if err != nil {
return nil, nil, NewError(http.StatusNotFound, "")
}
// Obtain the set of candidate methods (Routes)
routes := r.selectRoutes(dispatcher, finalMatch)
if len(routes) == 0 {
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
// Identify the method (Route) that will handle the request
route, ok := r.detectRoute(routes, httpRequest)
return dispatcher, route, ok
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
// http method
methodOk := []Route{}
for _, each := range routes {
if httpRequest.Method == each.Method {
methodOk = append(methodOk, each)
}
}
if len(methodOk) == 0 {
if trace {
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
}
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
}
inputMediaOk := methodOk
// content-type
contentType := httpRequest.Header.Get(HEADER_ContentType)
inputMediaOk = []Route{}
for _, each := range methodOk {
if each.matchesContentType(contentType) {
inputMediaOk = append(inputMediaOk, each)
}
}
if len(inputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
}
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
outputMediaOk := []Route{}
accept := httpRequest.Header.Get(HEADER_Accept)
if accept == "" {
accept = "*/*"
}
for _, each := range inputMediaOk {
if each.matchesAccept(accept) {
outputMediaOk = append(outputMediaOk, each)
}
}
if len(outputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
}
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
}
return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
// n/m > n/* > */*
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
// TODO
return &routes[0]
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
filtered := &sortableRouteCandidates{}
for _, each := range dispatcher.Routes() {
pathExpr := each.pathExpr
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
if matches != nil {
lastMatch := matches[len(matches)-1]
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor /.
filtered.candidates = append(filtered.candidates,
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
}
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
}
return []Route{}
}
sort.Sort(sort.Reverse(filtered))
// select other routes from candidates whoes expression matches rmatch
matchingRoutes := []Route{filtered.candidates[0].route}
for c := 1; c < len(filtered.candidates); c++ {
each := filtered.candidates[c]
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
matchingRoutes = append(matchingRoutes, each.route)
}
}
return matchingRoutes
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
filtered := &sortableDispatcherCandidates{}
for _, each := range dispatchers {
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
filtered.candidates = append(filtered.candidates,
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
}
return nil, "", errors.New("not found")
}
sort.Sort(sort.Reverse(filtered))
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
}
// Types and functions to support the sorting of Routes
type routeCandidate struct {
route Route
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
func (r routeCandidate) expressionToMatch() string {
return r.route.pathExpr.Source
}
func (r routeCandidate) String() string {
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
}
type sortableRouteCandidates struct {
candidates []routeCandidate
}
func (rcs *sortableRouteCandidates) Len() int {
return len(rcs.candidates)
}
func (rcs *sortableRouteCandidates) Swap(i, j int) {
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
}
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
ci := rcs.candidates[i]
cj := rcs.candidates[j]
// primary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// secundary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// tertiary key
if ci.nonDefaultCount < cj.nonDefaultCount {
return true
}
if ci.nonDefaultCount > cj.nonDefaultCount {
return false
}
// quaternary key ("source" is interpreted as Path)
return ci.route.Path < cj.route.Path
}
// Types and functions to support the sorting of Dispatchers
type dispatcherCandidate struct {
dispatcher *WebService
finalMatch string
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
type sortableDispatcherCandidates struct {
candidates []dispatcherCandidate
}
func (dc *sortableDispatcherCandidates) Len() int {
return len(dc.candidates)
}
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
}
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
ci := dc.candidates[i]
cj := dc.candidates[j]
// primary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// secundary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// tertiary key
return ci.nonDefaultCount < cj.nonDefaultCount
}

31
vendor/github.com/emicklei/go-restful/log/log.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package log
import (
stdlog "log"
"os"
)
// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
}
var Logger StdLogger
func init() {
// default Logger
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
}
func SetLogger(customLogger StdLogger) {
Logger = customLogger
}
func Print(v ...interface{}) {
Logger.Print(v...)
}
func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...)
}

32
vendor/github.com/emicklei/go-restful/logger.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package restful
// Copyright 2014 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"github.com/emicklei/go-restful/log"
)
var trace bool = false
var traceLogger log.StdLogger
func init() {
traceLogger = log.Logger // use the package logger by default
}
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
func TraceLogger(logger log.StdLogger) {
traceLogger = logger
EnableTracing(logger != nil)
}
// expose the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger)
}
// EnableTracing can be used to Trace logging on and off.
func EnableTracing(enabled bool) {
trace = enabled
}

45
vendor/github.com/emicklei/go-restful/mime.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package restful
import (
"strconv"
"strings"
)
type mime struct {
media string
quality float64
}
// insertMime adds a mime to a list and keeps it sorted by quality.
func insertMime(l []mime, e mime) []mime {
for i, each := range l {
// if current mime has lower quality then insert before
if e.quality > each.quality {
left := append([]mime{}, l[0:i]...)
return append(append(left, e), l[i:]...)
}
}
return append(l, e)
}
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
func sortedMimes(accept string) (sorted []mime) {
for _, each := range strings.Split(accept, ",") {
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
if len(typeAndQuality) == 1 {
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
} else {
// take factor
parts := strings.Split(typeAndQuality[1], "=")
if len(parts) == 2 {
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
} else {
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
}
}
}
}
return
}

View File

@ -0,0 +1,26 @@
package restful
import "strings"
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// As for any filter, you can also install it for a particular WebService within a Container.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
if "OPTIONS" != req.Request.Method {
chain.ProcessFilter(req, resp)
return
}
resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
}
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func OPTIONSFilter() FilterFunction {
return DefaultContainer.OPTIONSFilter
}

114
vendor/github.com/emicklei/go-restful/parameter.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
// PathParameterKind = indicator of Request parameter type "path"
PathParameterKind = iota
// QueryParameterKind = indicator of Request parameter type "query"
QueryParameterKind
// BodyParameterKind = indicator of Request parameter type "body"
BodyParameterKind
// HeaderParameterKind = indicator of Request parameter type "header"
HeaderParameterKind
// FormParameterKind = indicator of Request parameter type "form"
FormParameterKind
)
// Parameter is for documententing the parameter used in a Http Request
// ParameterData kinds are Path,Query and Body
type Parameter struct {
data *ParameterData
}
// ParameterData represents the state of a Parameter.
// It is made public to make it accessible to e.g. the Swagger package.
type ParameterData struct {
Name, Description, DataType, DataFormat string
Kind int
Required bool
AllowableValues map[string]string
AllowMultiple bool
DefaultValue string
}
// Data returns the state of the Parameter
func (p *Parameter) Data() ParameterData {
return *p.data
}
// Kind returns the parameter type indicator (see const for valid values)
func (p *Parameter) Kind() int {
return p.data.Kind
}
func (p *Parameter) bePath() *Parameter {
p.data.Kind = PathParameterKind
return p
}
func (p *Parameter) beQuery() *Parameter {
p.data.Kind = QueryParameterKind
return p
}
func (p *Parameter) beBody() *Parameter {
p.data.Kind = BodyParameterKind
return p
}
func (p *Parameter) beHeader() *Parameter {
p.data.Kind = HeaderParameterKind
return p
}
func (p *Parameter) beForm() *Parameter {
p.data.Kind = FormParameterKind
return p
}
// Required sets the required field and returns the receiver
func (p *Parameter) Required(required bool) *Parameter {
p.data.Required = required
return p
}
// AllowMultiple sets the allowMultiple field and returns the receiver
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
p.data.AllowMultiple = multiple
return p
}
// AllowableValues sets the allowableValues field and returns the receiver
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
p.data.AllowableValues = values
return p
}
// DataType sets the dataType field and returns the receiver
func (p *Parameter) DataType(typeName string) *Parameter {
p.data.DataType = typeName
return p
}
// DataFormat sets the dataFormat field for Swagger UI
func (p *Parameter) DataFormat(formatName string) *Parameter {
p.data.DataFormat = formatName
return p
}
// DefaultValue sets the default value field and returns the receiver
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
p.data.DefaultValue = stringRepresentation
return p
}
// Description sets the description value field and returns the receiver
func (p *Parameter) Description(doc string) *Parameter {
p.data.Description = doc
return p
}

View File

@ -0,0 +1,69 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// PathExpression holds a compiled path expression (RegExp) needed to match against
// Http request paths and to extract path parameter values.
type pathExpression struct {
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
VarCount int // the number of named parameters (enclosed by {}) in the path
Matcher *regexp.Regexp
Source string // Path as defined by the RouteBuilder
tokens []string
}
// NewPathExpression creates a PathExpression from the input URL path.
// Returns an error if the path is invalid.
func newPathExpression(path string) (*pathExpression, error) {
expression, literalCount, varCount, tokens := templateToRegularExpression(path)
compiled, err := regexp.Compile(expression)
if err != nil {
return nil, err
}
return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
var buffer bytes.Buffer
buffer.WriteString("^")
//tokens = strings.Split(template, "/")
tokens = tokenizePath(template)
for _, each := range tokens {
if each == "" {
continue
}
buffer.WriteString("/")
if strings.HasPrefix(each, "{") {
// check for regular expression in variable
colon := strings.Index(each, ":")
if colon != -1 {
// extract expression
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
if paramExpr == "*" { // special case
buffer.WriteString("(.*)")
} else {
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
}
} else {
// plain var
buffer.WriteString("([^/]+?)")
}
varCount += 1
} else {
literalCount += len(each)
encoded := each // TODO URI encode
buffer.WriteString(regexp.QuoteMeta(encoded))
}
}
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
}

131
vendor/github.com/emicklei/go-restful/request.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"compress/zlib"
"io/ioutil"
"net/http"
)
var defaultRequestContentType string
var doCacheReadEntityBytes = true
// Request is a wrapper for a http Request that provides convenience methods
type Request struct {
Request *http.Request
bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
}
func NewRequest(httpRequest *http.Request) *Request {
return &Request{
Request: httpRequest,
pathParameters: map[string]string{},
attributes: map[string]interface{}{},
} // empty parameters, attributes
}
// If ContentType is missing or */* is given then fall back to this type, otherwise
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
func SetCacheReadEntity(doCache bool) {
doCacheReadEntityBytes = doCache
}
// PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string {
return r.pathParameters[name]
}
// PathParameters accesses the Path parameter values
func (r *Request) PathParameters() map[string]string {
return r.pathParameters
}
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
return r.Request.FormValue(name)
}
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
func (r *Request) BodyParameter(name string) (string, error) {
err := r.Request.ParseForm()
if err != nil {
return "", err
}
return r.Request.PostFormValue(name), nil
}
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
func (r *Request) HeaderParameter(name string) string {
return r.Request.Header.Get(name)
}
// ReadEntity checks the Accept header and reads the content into the entityPointer.
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
// OLD feature, cache the body for reads
if doCacheReadEntityBytes {
if r.bodyContent == nil {
data, err := ioutil.ReadAll(r.Request.Body)
if err != nil {
return err
}
r.bodyContent = &data
}
r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
}
// check if the request body needs decompression
if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader()
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
gzipReader.Reset(r.Request.Body)
r.Request.Body = gzipReader
} else if ENCODING_DEFLATE == contentEncoding {
zlibReader, err := zlib.NewReader(r.Request.Body)
if err != nil {
return err
}
r.Request.Body = zlibReader
}
// lookup the EntityReader
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
if !ok {
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
}
return entityReader.Read(r, entityPointer)
}
// SetAttribute adds or replaces the attribute with the given value.
func (r *Request) SetAttribute(name string, value interface{}) {
r.attributes[name] = value
}
// Attribute returns the value associated to the given name. Returns nil if absent.
func (r Request) Attribute(name string) interface{} {
return r.attributes[name]
}
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
func (r Request) SelectedRoutePath() string {
return r.selectedRoutePath
}

235
vendor/github.com/emicklei/go-restful/response.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"net/http"
)
// DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
var PrettyPrintResponses = true
// Response is a wrapper on the actual http ResponseWriter
// It provides several convenience methods to prepare and write response content.
type Response struct {
http.ResponseWriter
requestAccept string // mime-type what the Http Request says it wants to receive
routeProduces []string // mime-types what the Route says it can produce
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
contentLength int // number of bytes written for the response body
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
err error // err property is kept when WriteError is called
}
// Creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response {
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
}
// If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultResponseContentType(restful.MIME_JSON)
func DefaultResponseContentType(mime string) {
DefaultResponseMimeType = mime
}
// InternalServerError writes the StatusInternalServerError header.
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
func (r Response) InternalServerError() Response {
r.WriteHeader(http.StatusInternalServerError)
return r
}
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
func (r *Response) PrettyPrint(bePretty bool) {
r.prettyPrint = bePretty
}
// AddHeader is a shortcut for .Header().Add(header,value)
func (r Response) AddHeader(header string, value string) Response {
r.Header().Add(header, value)
return r
}
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
func (r *Response) SetRequestAccepts(mime string) {
r.requestAccept = mime
}
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
sorted := sortedMimes(r.requestAccept)
for _, eachAccept := range sorted {
for _, eachProduce := range r.routeProduces {
if eachProduce == eachAccept.media {
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
return w, true
}
}
}
if eachAccept.media == "*/*" {
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
}
}
// if requestAccept is empty
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
if !ok {
// if not registered then fallback to the defaults (if set)
if DefaultResponseMimeType == MIME_JSON {
return entityAccessRegistry.accessorAt(MIME_JSON)
}
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
if trace {
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
}
}
return writer, ok
}
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
func (r *Response) WriteEntity(value interface{}) error {
return r.WriteHeaderAndEntity(http.StatusOK, value)
}
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
// Current implementation ignores any q-parameters in the Accept Header.
// Returns an error if the value could not be written on the response.
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
writer, ok := r.EntityWriter()
if !ok {
r.WriteHeader(http.StatusNotAcceptable)
return nil
}
return writer.Write(r, status, value)
}
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsXml(value interface{}) error {
return writeXML(r, http.StatusOK, MIME_XML, value)
}
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
return writeXML(r, status, MIME_XML, value)
}
// WriteAsJson is a convenience method for writing a value in json.
// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsJson(value interface{}) error {
return writeJSON(r, http.StatusOK, MIME_JSON, value)
}
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter.
func (r *Response) WriteJson(value interface{}, contentType string) error {
return writeJSON(r, http.StatusOK, contentType, value)
}
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
return writeJSON(r, status, contentType, value)
}
// WriteError write the http status and the error string on the response.
func (r *Response) WriteError(httpStatus int, err error) error {
r.err = err
return r.WriteErrorString(httpStatus, err.Error())
}
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
r.err = err
return r.WriteHeaderAndEntity(httpStatus, err)
}
// WriteErrorString is a convenience method for an error status with the actual error
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
if r.err == nil {
// if not called from WriteError
r.err = errors.New(errorReason)
}
r.WriteHeader(httpStatus)
if _, err := r.Write([]byte(errorReason)); err != nil {
return err
}
return nil
}
// Flush implements http.Flusher interface, which sends any buffered data to the client.
func (r *Response) Flush() {
if f, ok := r.ResponseWriter.(http.Flusher); ok {
f.Flush()
} else if trace {
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
}
}
// WriteHeader is overridden to remember the Status Code that has been written.
// Changes to the Header of the response have no effect after this.
func (r *Response) WriteHeader(httpStatus int) {
r.statusCode = httpStatus
r.ResponseWriter.WriteHeader(httpStatus)
}
// StatusCode returns the code that has been written using WriteHeader.
func (r Response) StatusCode() int {
if 0 == r.statusCode {
// no status code has been written yet; assume OK
return http.StatusOK
}
return r.statusCode
}
// Write writes the data to the connection as part of an HTTP reply.
// Write is part of http.ResponseWriter interface.
func (r *Response) Write(bytes []byte) (int, error) {
written, err := r.ResponseWriter.Write(bytes)
r.contentLength += written
return written, err
}
// ContentLength returns the number of bytes written for the response content.
// Note that this value is only correct if all data is written through the Response using its Write* methods.
// Data written directly using the underlying http.ResponseWriter is not accounted for.
func (r Response) ContentLength() int {
return r.contentLength
}
// CloseNotify is part of http.CloseNotifier interface
func (r Response) CloseNotify() <-chan bool {
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Error returns the err created by WriteError
func (r Response) Error() error {
return r.err
}

183
vendor/github.com/emicklei/go-restful/route.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"net/http"
"strings"
)
// RouteFunction declares the signature of a function that can be bound to a Route.
type RouteFunction func(*Request, *Response)
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
type Route struct {
Method string
Produces []string
Consumes []string
Path string // webservice root path + described path
Function RouteFunction
Filters []FilterFunction
// cached values for dispatching
relativePath string
pathParts []string
pathExpr *pathExpression // cached compilation of relativePath as RegExp
// documentation
Doc string
Notes string
Operation string
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
}
// Initialize for Route
func (r *Route) postBuild() {
r.pathParts = tokenizePath(r.Path)
}
// Create Request and Response from their http versions
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
params := r.extractParameters(httpRequest.URL.Path)
wrappedRequest := NewRequest(httpRequest)
wrappedRequest.pathParameters = params
wrappedRequest.selectedRoutePath = r.Path
wrappedResponse := NewResponse(httpWriter)
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
wrappedResponse.routeProduces = r.Produces
return wrappedRequest, wrappedResponse
}
// dispatchWithFilters call the function after passing through its own filters
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
if len(r.Filters) > 0 {
chain := FilterChain{Filters: r.Filters, Target: r.Function}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// unfiltered
r.Function(wrappedRequest, wrappedResponse)
}
}
// Return whether the mimeType matches to what this Route can produce.
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
parts := strings.Split(mimeTypesWithQuality, ",")
for _, each := range parts {
var withoutQuality string
if strings.Contains(each, ";") {
withoutQuality = strings.Split(each, ";")[0]
} else {
withoutQuality = each
}
// trim before compare
withoutQuality = strings.Trim(withoutQuality, " ")
if withoutQuality == "*/*" {
return true
}
for _, producibleType := range r.Produces {
if producibleType == "*/*" || producibleType == withoutQuality {
return true
}
}
}
return false
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
// did not specify what it can consume ; any media type (“*/*”) is assumed
return true
}
if len(mimeTypes) == 0 {
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true
}
// proceed with default
mimeTypes = MIME_OCTET
}
parts := strings.Split(mimeTypes, ",")
for _, each := range parts {
var contentType string
if strings.Contains(each, ";") {
contentType = strings.Split(each, ";")[0]
} else {
contentType = each
}
// trim before compare
contentType = strings.Trim(contentType, " ")
for _, consumeableType := range r.Consumes {
if consumeableType == "*/*" || consumeableType == contentType {
return true
}
}
}
return false
}
// Extract the parameters from the request url path
func (r Route) extractParameters(urlPath string) map[string]string {
urlParts := tokenizePath(urlPath)
pathParameters := map[string]string{}
for i, key := range r.pathParts {
var value string
if i >= len(urlParts) {
value = ""
} else {
value = urlParts[i]
}
if strings.HasPrefix(key, "{") { // path-parameter
if colon := strings.Index(key, ":"); colon != -1 {
// extract by regex
regPart := key[colon+1 : len(key)-1]
keyPart := key[1:colon]
if regPart == "*" {
pathParameters[keyPart] = untokenizePath(i, urlParts)
break
} else {
pathParameters[keyPart] = value
}
} else {
// without enclosing {}
pathParameters[key[1:len(key)-1]] = value
}
}
}
return pathParameters
}
// Untokenize back into an URL path using the slash separator
func untokenizePath(offset int, parts []string) string {
var buffer bytes.Buffer
for p := offset; p < len(parts); p++ {
buffer.WriteString(parts[p])
// do not end
if p < len(parts)-1 {
buffer.WriteString("/")
}
}
return buffer.String()
}
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
func tokenizePath(path string) []string {
if "/" == path {
return []string{}
}
return strings.Split(strings.Trim(path, "/"), "/")
}
// for debugging
func (r Route) String() string {
return r.Method + " " + r.Path
}

240
vendor/github.com/emicklei/go-restful/route_builder.go generated vendored Normal file
View File

@ -0,0 +1,240 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"os"
"reflect"
"runtime"
"strings"
"github.com/emicklei/go-restful/log"
)
// RouteBuilder is a helper to construct Routes.
type RouteBuilder struct {
rootPath string
currentPath string
produces []string
consumes []string
httpMethod string // required
function RouteFunction // required
filters []FilterFunction
// documentation
doc string
notes string
operation string
readSample, writeSample interface{}
parameters []*Parameter
errorMap map[int]ResponseError
}
// Do evaluates each argument with the RouteBuilder itself.
// This allows you to follow DRY principles without breaking the fluent programming style.
// Example:
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
// func Returns500(b *RouteBuilder) {
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
// }
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
for _, each := range oneArgBlocks {
each(b)
}
return b
}
// To bind the route to a function.
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
b.function = function
return b
}
// Method specifies what HTTP method to match. Required.
func (b *RouteBuilder) Method(method string) *RouteBuilder {
b.httpMethod = method
return b
}
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
b.produces = mimeTypes
return b
}
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
b.consumes = mimeTypes
return b
}
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
b.currentPath = subPath
return b
}
// Doc tells what this route is all about. Optional.
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
b.doc = documentation
return b
}
// A verbose explanation of the operation behavior. Optional.
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
b.notes = notes
return b
}
// Reads tells what resource type will be read from the request payload. Optional.
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
b.readSample = sample
typeAsName := reflect.TypeOf(sample).String()
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
bodyParameter.beBody()
bodyParameter.Required(true)
bodyParameter.DataType(typeAsName)
b.Param(bodyParameter)
return b
}
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
// Use this to modify or extend information for the Parameter (through its Data()).
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
for _, each := range b.parameters {
if each.Data().Name == name {
return each
}
}
return p
}
// Writes tells what resource type will be written as the response payload. Optional.
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
b.writeSample = sample
return b
}
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
if b.parameters == nil {
b.parameters = []*Parameter{}
}
b.parameters = append(b.parameters, parameter)
return b
}
// Operation allows you to document what the acutal method/function call is of the Route.
// Unless called, the operation name is derived from the RouteFunction set using To(..).
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
b.operation = name
return b
}
// ReturnsError is deprecated, use Returns instead.
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
log.Print("ReturnsError is deprecated, use Returns instead.")
return b.Returns(code, message, model)
}
// Returns allows you to document what responses (errors or regular) can be expected.
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
err := ResponseError{
Code: code,
Message: message,
Model: model,
}
// lazy init because there is no NewRouteBuilder (yet)
if b.errorMap == nil {
b.errorMap = map[int]ResponseError{}
}
b.errorMap[code] = err
return b
}
type ResponseError struct {
Code int
Message string
Model interface{}
}
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
b.rootPath = path
return b
}
// Filter appends a FilterFunction to the end of filters for this Route to build.
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
b.filters = append(b.filters, filter)
return b
}
// If no specific Route path then set to rootPath
// If no specific Produces then set to rootProduces
// If no specific Consumes then set to rootConsumes
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
if len(b.produces) == 0 {
b.produces = rootProduces
}
if len(b.consumes) == 0 {
b.consumes = rootConsumes
}
}
// Build creates a new Route using the specification details collected by the RouteBuilder
func (b *RouteBuilder) Build() Route {
pathExpr, err := newPathExpression(b.currentPath)
if err != nil {
log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
os.Exit(1)
}
if b.function == nil {
log.Printf("[restful] No function specified for route:" + b.currentPath)
os.Exit(1)
}
operationName := b.operation
if len(operationName) == 0 && b.function != nil {
// extract from definition
operationName = nameOfFunction(b.function)
}
route := Route{
Method: b.httpMethod,
Path: concatPath(b.rootPath, b.currentPath),
Produces: b.produces,
Consumes: b.consumes,
Function: b.function,
Filters: b.filters,
relativePath: b.currentPath,
pathExpr: pathExpr,
Doc: b.doc,
Notes: b.notes,
Operation: operationName,
ParameterDocs: b.parameters,
ResponseErrors: b.errorMap,
ReadSample: b.readSample,
WriteSample: b.writeSample}
route.postBuild()
return route
}
func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
}
// nameOfFunction returns the short name of the function f for documentation.
// It uses a runtime feature for debugging ; its value may change for later Go versions.
func nameOfFunction(f interface{}) string {
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
tokenized := strings.Split(fun.Name(), ".")
last := tokenized[len(tokenized)-1]
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
last = strings.TrimSuffix(last, "-fm") // Go 1.5
return last
}

18
vendor/github.com/emicklei/go-restful/router.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "net/http"
// A RouteSelector finds the best matching Route given the input HTTP Request
type RouteSelector interface {
// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
// It returns a selected Route and its containing WebService or an error indicating
// a problem.
SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
}

23
vendor/github.com/emicklei/go-restful/service_error.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "fmt"
// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
type ServiceError struct {
Code int
Message string
}
// NewError returns a ServiceError using the code and reason
func NewError(code int, message string) ServiceError {
return ServiceError{Code: code, Message: message}
}
// Error returns a text representation of the service error
func (s ServiceError) Error() string {
return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
}

View File

@ -0,0 +1,43 @@
Change history of swagger
=
2015-10-16
- add type override mechanism for swagger models (MR 254, nathanejohnson)
- replace uses of wildcard in generated apidocs (issue 251)
2015-05-25
- (api break) changed the type of Properties in Model
- (api break) changed the type of Models in ApiDeclaration
- (api break) changed the parameter type of PostBuildDeclarationMapFunc
2015-04-09
- add ModelBuildable interface for customization of Model
2015-03-17
- preserve order of Routes per WebService in Swagger listing
- fix use of $ref and type in Swagger models
- add api version to listing
2014-11-14
- operation parameters are now sorted using ordering path,query,form,header,body
2014-11-12
- respect omitempty tag value for embedded structs
- expose ApiVersion of WebService to Swagger ApiDeclaration
2014-05-29
- (api add) Ability to define custom http.Handler to serve swagger-ui static files
2014-05-04
- (fix) include model for array element type of response
2014-01-03
- (fix) do not add primitive type to the Api models
2013-11-27
- (fix) make Swagger work for WebServices with root ("/" or "") paths
2013-10-29
- (api add) package variable LogInfo to customize logging function
2013-10-15
- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition)

View File

@ -0,0 +1,76 @@
How to use Swagger UI with go-restful
=
Get the Swagger UI sources (version 1.2 only)
git clone https://github.com/wordnik/swagger-ui.git
The project contains a "dist" folder.
Its contents has all the Swagger UI files you need.
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
config := swagger.Config{
WebServices: restful.RegisteredWebServices(),
ApiPath: "/apidocs.json",
SwaggerPath: "/apidocs/",
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
swagger.InstallSwaggerService(config)
Documenting Structs
--
Currently there are 2 ways to document your structs in the go-restful Swagger.
###### By using struct tags
- Use tag "description" to annotate a struct field with a description to show in the UI
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
###### By using the SwaggerDoc method
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
type Address struct {
Country string `json:"country,omitempty"`
PostCode int `json:"postcode,omitempty"`
}
func (Address) SwaggerDoc() map[string]string {
return map[string]string{
"": "Address doc",
"country": "Country doc",
"postcode": "PostCode doc",
}
}
This example will generate a JSON like this
{
"Address": {
"id": "Address",
"description": "Address doc",
"properties": {
"country": {
"type": "string",
"description": "Country doc"
},
"postcode": {
"type": "integer",
"format": "int32",
"description": "PostCode doc"
}
}
}
}
**Very Important Notes:**
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
Notes
--
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.

View File

@ -0,0 +1,64 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
type ApiDeclarationList struct {
List []ApiDeclaration
}
// At returns the ApiDeclaration by its path unless absent, then ok is false
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
for _, each := range l.List {
if each.ResourcePath == path {
return each, true
}
}
return a, false
}
// Put adds or replaces a ApiDeclaration with this name
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
// maybe replace existing
for i, each := range l.List {
if each.ResourcePath == path {
// replace
l.List[i] = a
return
}
}
// add
l.List = append(l.List, a)
}
// Do enumerates all the properties, each with its assigned name
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
for _, each := range l.List {
block(each.ResourcePath, each)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.ResourcePath)
buf.WriteString("\": ")
encoder.Encode(each)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}

View File

@ -0,0 +1,34 @@
package swagger
import (
"net/http"
"github.com/emicklei/go-restful"
)
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
type Config struct {
// url where the services are available, e.g. http://localhost:8080
// if left empty then the basePath of Swagger is taken from the actual request
WebServicesUrl string
// path where the JSON api is avaiable , e.g. /apidocs
ApiPath string
// [optional] path where the swagger UI will be served, e.g. /swagger
SwaggerPath string
// [optional] location of folder containing Swagger HTML5 application index.html
SwaggerFilePath string
// api listing is constructed from this list of restful WebServices.
WebServices []*restful.WebService
// will serve all static content (scripts,pages,images)
StaticHandler http.Handler
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
DisableCORS bool
// Top-level API version. Is reflected in the resource listing.
ApiVersion string
// If set then call this handler after building the complete ApiDeclaration Map
PostBuildHandler PostBuildDeclarationMapFunc
// Swagger global info struct
Info Info
}

View File

@ -0,0 +1,430 @@
package swagger
import (
"encoding/json"
"reflect"
"strings"
)
// ModelBuildable is used for extending Structs that need more control over
// how the Model appears in the Swagger api declaration.
type ModelBuildable interface {
PostBuildModel(m *Model) *Model
}
type modelBuilder struct {
Models *ModelList
}
type documentable interface {
SwaggerDoc() map[string]string
}
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
// If it exists, retrive the documentation and overwrite all struct tag descriptions
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
return docable.SwaggerDoc()
}
return make(map[string]string)
}
// addModelFrom creates and adds a Model to the builder and detects and calls
// the post build hook for customizations
func (b modelBuilder) addModelFrom(sample interface{}) {
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
// allow customizations
if buildable, ok := sample.(ModelBuildable); ok {
modelOrNil = buildable.PostBuildModel(modelOrNil)
b.Models.Put(modelOrNil.Id, *modelOrNil)
}
}
}
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
modelName := b.keyFrom(st)
if nameOverride != "" {
modelName = nameOverride
}
// no models needed for primitive types
if b.isPrimitiveType(modelName) {
return nil
}
// see if we already have visited this model
if _, ok := b.Models.At(modelName); ok {
return nil
}
sm := Model{
Id: modelName,
Required: []string{},
Properties: ModelPropertyList{}}
// reference the model before further initializing (enables recursive structs)
b.Models.Put(modelName, sm)
// check for slice or array
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
b.addModel(st.Elem(), "")
return &sm
}
// check for structure or primitive type
if st.Kind() != reflect.Struct {
return &sm
}
fullDoc := getDocFromMethodSwaggerDoc2(st)
modelDescriptions := []string{}
for i := 0; i < st.NumField(); i++ {
field := st.Field(i)
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
if len(modelDescription) > 0 {
modelDescriptions = append(modelDescriptions, modelDescription)
}
// add if not omitted
if len(jsonName) != 0 {
// update description
if fieldDoc, ok := fullDoc[jsonName]; ok {
prop.Description = fieldDoc
}
// update Required
if b.isPropertyRequired(field) {
sm.Required = append(sm.Required, jsonName)
}
sm.Properties.Put(jsonName, prop)
}
}
// We always overwrite documentation if SwaggerDoc method exists
// "" is special for documenting the struct itself
if modelDoc, ok := fullDoc[""]; ok {
sm.Description = modelDoc
} else if len(modelDescriptions) != 0 {
sm.Description = strings.Join(modelDescriptions, "\n")
}
// update model builder with completed model
b.Models.Put(modelName, sm)
return &sm
}
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
required := true
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "omitempty" {
return false
}
}
return required
}
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
jsonName = b.jsonNameOfField(field)
if len(jsonName) == 0 {
// empty name signals skip property
return "", "", prop
}
if tag := field.Tag.Get("modelDescription"); tag != "" {
modelDescription = tag
}
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, modelDescription, prop
}
fieldType := field.Type
// check if type is doing its own marshalling
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
if fieldType.Implements(marshalerType) {
var pType = "string"
if prop.Type == nil {
prop.Type = &pType
}
if prop.Format == "" {
prop.Format = b.jsonSchemaFormat(fieldType.String())
}
return jsonName, modelDescription, prop
}
// check if annotation says it is a string
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "string" {
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
}
}
fieldKind := fieldType.Kind()
switch {
case fieldKind == reflect.Struct:
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
return jsonName, modelDescription, prop
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.Ptr:
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.String:
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
case fieldKind == reflect.Map:
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
objectType := "object"
prop.Type = &objectType
return jsonName, modelDescription, prop
}
if b.isPrimitiveType(fieldType.String()) {
mapped := b.jsonSchemaType(fieldType.String())
prop.Type = &mapped
prop.Format = b.jsonSchemaFormat(fieldType.String())
return jsonName, modelDescription, prop
}
modelType := fieldType.String()
prop.Ref = &modelType
if fieldType.Name() == "" { // override type of anonymous structs
nestedTypeName := modelName + "." + jsonName
prop.Ref = &nestedTypeName
b.addModel(fieldType, nestedTypeName)
}
return jsonName, modelDescription, prop
}
func hasNamedJSONTag(field reflect.StructField) bool {
parts := strings.Split(field.Tag.Get("json"), ",")
if len(parts) == 0 {
return false
}
for _, s := range parts[1:] {
if s == "inline" {
return false
}
}
return len(parts[0]) > 0
}
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tag
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// check for anonymous
if len(fieldType.Name()) == 0 {
// anonymous
anonType := model.Id + "." + jsonName
b.addModel(fieldType, anonType)
prop.Ref = &anonType
return jsonName, prop
}
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
// embedded struct
sub := modelBuilder{new(ModelList)}
sub.addModel(fieldType, "")
subKey := sub.keyFrom(fieldType)
// merge properties from sub
subModel, _ := sub.Models.At(subKey)
subModel.Properties.Do(func(k string, v ModelProperty) {
model.Properties.Put(k, v)
// if subModel says this property is required then include it
required := false
for _, each := range subModel.Required {
if k == each {
required = true
break
}
}
if required {
model.Required = append(model.Required, k)
}
})
// add all new referenced models
sub.Models.Do(func(key string, sub Model) {
if key != subKey {
if _, ok := b.Models.At(key); !ok {
b.Models.Put(key, sub)
}
}
})
// empty name signals skip property
return "", prop
}
// simple struct
b.addModel(fieldType, "")
var pType = fieldType.String()
prop.Ref = &pType
return jsonName, prop
}
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
// check for type override in tags
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
prop.Items = new(Item)
if isPrimitive {
mapped := b.jsonSchemaType(elemTypeName)
prop.Items.Type = &mapped
} else {
prop.Items.Ref = &elemTypeName
}
// add|overwrite model for element type
if fieldType.Elem().Kind() == reflect.Ptr {
fieldType = fieldType.Elem()
}
if !isPrimitive {
b.addModel(fieldType.Elem(), elemTypeName)
}
return jsonName, prop
}
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tags
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// override type of pointer to list-likes
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
if isPrimitive {
primName := b.jsonSchemaType(elemName)
prop.Items = &Item{Ref: &primName}
} else {
prop.Items = &Item{Ref: &elemName}
}
if !isPrimitive {
// add|overwrite model for element type
b.addModel(fieldType.Elem().Elem(), elemName)
}
} else {
// non-array, pointer type
var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path
if b.isPrimitiveType(fieldType.String()[1:]) {
prop.Type = &pType
prop.Format = b.jsonSchemaFormat(fieldType.String()[1:])
return jsonName, prop
}
prop.Ref = &pType
elemName := ""
if fieldType.Elem().Name() == "" {
elemName = modelName + "." + jsonName
prop.Ref = &elemName
}
b.addModel(fieldType.Elem(), elemName)
}
return jsonName, prop
}
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
if t.Kind() == reflect.Ptr {
return t.String()[1:]
}
if t.Name() == "" {
return modelName + "." + jsonName
}
return b.keyFrom(t)
}
func (b modelBuilder) keyFrom(st reflect.Type) string {
key := st.String()
if len(st.Name()) == 0 { // unnamed type
// Swagger UI has special meaning for [
key = strings.Replace(key, "[]", "||", -1)
}
return key
}
// see also https://golang.org/ref/spec#Numeric_types
func (b modelBuilder) isPrimitiveType(modelName string) bool {
if len(modelName) == 0 {
return false
}
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
}
// jsonNameOfField returns the name of the field as it should appear in JSON format
// An empty string indicates that this field is not part of the JSON representation
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if s[0] == "-" {
// empty name signals skip property
return ""
} else if s[0] != "" {
return s[0]
}
}
return field.Name
}
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
func (b modelBuilder) jsonSchemaType(modelName string) string {
schemaMap := map[string]string{
"uint": "integer",
"uint8": "integer",
"uint16": "integer",
"uint32": "integer",
"uint64": "integer",
"int": "integer",
"int8": "integer",
"int16": "integer",
"int32": "integer",
"int64": "integer",
"byte": "integer",
"float64": "number",
"float32": "number",
"bool": "boolean",
"time.Time": "string",
}
mapped, ok := schemaMap[modelName]
if !ok {
return modelName // use as is (custom or struct)
}
return mapped
}
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
schemaMap := map[string]string{
"int": "int32",
"int32": "int32",
"int64": "int64",
"byte": "byte",
"uint": "integer",
"uint8": "byte",
"float64": "double",
"float32": "float",
"time.Time": "date-time",
"*time.Time": "date-time",
}
mapped, ok := schemaMap[modelName]
if !ok {
return "" // no format
}
return mapped
}

View File

@ -0,0 +1,86 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModel associates a name with a Model (not using its Id)
type NamedModel struct {
Name string
Model Model
}
// ModelList encapsulates a list of NamedModel (association)
type ModelList struct {
List []NamedModel
}
// Put adds or replaces a Model by its name
func (l *ModelList) Put(name string, model Model) {
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModel{name, model}
return
}
}
// add
l.List = append(l.List, NamedModel{name, model})
}
// At returns a Model by its name, ok is false if absent
func (l *ModelList) At(name string) (m Model, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Model, true
}
}
return m, false
}
// Do enumerates all the models, each with its assigned name
func (l *ModelList) Do(block func(name string, value Model)) {
for _, each := range l.List {
block(each.Name, each.Model)
}
}
// MarshalJSON writes the ModelList as if it was a map[string]Model
func (l ModelList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Model)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
func (l *ModelList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m Model
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View File

@ -0,0 +1,66 @@
package swagger
import (
"reflect"
"strings"
)
func (prop *ModelProperty) setDescription(field reflect.StructField) {
if tag := field.Tag.Get("description"); tag != "" {
prop.Description = tag
}
}
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
if tag := field.Tag.Get("default"); tag != "" {
prop.DefaultValue = Special(tag)
}
}
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
// We use | to separate the enum values. This value is chosen
// since its unlikely to be useful in actual enumeration values.
if tag := field.Tag.Get("enum"); tag != "" {
prop.Enum = strings.Split(tag, "|")
}
}
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
if tag := field.Tag.Get("maximum"); tag != "" {
prop.Maximum = tag
}
}
func (prop *ModelProperty) setType(field reflect.StructField) {
if tag := field.Tag.Get("type"); tag != "" {
prop.Type = &tag
}
}
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
if tag := field.Tag.Get("minimum"); tag != "" {
prop.Minimum = tag
}
}
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
tag := field.Tag.Get("unique")
switch tag {
case "true":
v := true
prop.UniqueItems = &v
case "false":
v := false
prop.UniqueItems = &v
}
}
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
prop.setDescription(field)
prop.setEnumValues(field)
prop.setMinimum(field)
prop.setMaximum(field)
prop.setUniqueItems(field)
prop.setDefaultValue(field)
prop.setType(field)
}

View File

@ -0,0 +1,87 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModelProperty associates a name to a ModelProperty
type NamedModelProperty struct {
Name string
Property ModelProperty
}
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
type ModelPropertyList struct {
List []NamedModelProperty
}
// At returns the ModelPropety by its name unless absent, then ok is false
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Property, true
}
}
return p, false
}
// Put adds or replaces a ModelProperty with this name
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
// maybe replace existing
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModelProperty{Name: name, Property: prop}
return
}
}
// add
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
}
// Do enumerates all the properties, each with its assigned name
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
for _, each := range l.List {
block(each.Name, each.Property)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Property)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m ModelProperty
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View File

@ -0,0 +1,36 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "github.com/emicklei/go-restful"
type orderedRouteMap struct {
elements map[string][]restful.Route
keys []string
}
func newOrderedRouteMap() *orderedRouteMap {
return &orderedRouteMap{
elements: map[string][]restful.Route{},
keys: []string{},
}
}
func (o *orderedRouteMap) Add(key string, route restful.Route) {
routes, ok := o.elements[key]
if ok {
routes = append(routes, route)
o.elements[key] = routes
return
}
o.elements[key] = []restful.Route{route}
o.keys = append(o.keys, key)
}
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
for _, k := range o.keys {
block(k, o.elements[k])
}
}

View File

@ -0,0 +1,184 @@
// Package swagger implements the structures of the Swagger
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
package swagger
const swaggerVersion = "1.2"
// 4.3.3 Data Type Fields
type DataTypeFields struct {
Type *string `json:"type,omitempty"` // if Ref not used
Ref *string `json:"$ref,omitempty"` // if Type not used
Format string `json:"format,omitempty"`
DefaultValue Special `json:"defaultValue,omitempty"`
Enum []string `json:"enum,omitempty"`
Minimum string `json:"minimum,omitempty"`
Maximum string `json:"maximum,omitempty"`
Items *Item `json:"items,omitempty"`
UniqueItems *bool `json:"uniqueItems,omitempty"`
}
type Special string
// 4.3.4 Items Object
type Item struct {
Type *string `json:"type,omitempty"`
Ref *string `json:"$ref,omitempty"`
Format string `json:"format,omitempty"`
}
// 5.1 Resource Listing
type ResourceListing struct {
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
Apis []Resource `json:"apis"`
ApiVersion string `json:"apiVersion"`
Info Info `json:"info"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.1.2 Resource Object
type Resource struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
}
// 5.1.3 Info Object
type Info struct {
Title string `json:"title"`
Description string `json:"description"`
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
Contact string `json:"contact,omitempty"`
License string `json:"license,omitempty"`
LicenseUrl string `json:"licenseUrl,omitempty"`
}
// 5.1.5
type Authorization struct {
Type string `json:"type"`
PassAs string `json:"passAs"`
Keyname string `json:"keyname"`
Scopes []Scope `json:"scopes"`
GrantTypes []GrantType `json:"grandTypes"`
}
// 5.1.6, 5.2.11
type Scope struct {
// Required. The name of the scope.
Scope string `json:"scope"`
// Recommended. A short description of the scope.
Description string `json:"description"`
}
// 5.1.7
type GrantType struct {
Implicit Implicit `json:"implicit"`
AuthorizationCode AuthorizationCode `json:"authorization_code"`
}
// 5.1.8 Implicit Object
type Implicit struct {
// Required. The login endpoint definition.
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.1.9 Authorization Code Object
type AuthorizationCode struct {
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
}
// 5.1.10 Login Endpoint Object
type LoginEndpoint struct {
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
}
// 5.1.11 Token Request Endpoint Object
type TokenRequestEndpoint struct {
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "client_id" OAuth2 parameter.
ClientIdName string `json:"clientIdName"`
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
ClientSecretName string `json:"clientSecretName"`
}
// 5.1.12 Token Endpoint Object
type TokenEndpoint struct {
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.2 API Declaration
type ApiDeclaration struct {
SwaggerVersion string `json:"swaggerVersion"`
ApiVersion string `json:"apiVersion"`
BasePath string `json:"basePath"`
ResourcePath string `json:"resourcePath"` // must start with /
Apis []Api `json:"apis,omitempty"`
Models ModelList `json:"models,omitempty"`
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.2.2 API Object
type Api struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
Operations []Operation `json:"operations,omitempty"`
}
// 5.2.3 Operation Object
type Operation struct {
DataTypeFields
Method string `json:"method"`
Summary string `json:"summary,omitempty"`
Notes string `json:"notes,omitempty"`
Nickname string `json:"nickname"`
Authorizations []Authorization `json:"authorizations,omitempty"`
Parameters []Parameter `json:"parameters"`
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Deprecated string `json:"deprecated,omitempty"`
}
// 5.2.4 Parameter Object
type Parameter struct {
DataTypeFields
ParamType string `json:"paramType"` // path,query,body,header,form
Name string `json:"name"`
Description string `json:"description"`
Required bool `json:"required"`
AllowMultiple bool `json:"allowMultiple"`
}
// 5.2.5 Response Message Object
type ResponseMessage struct {
Code int `json:"code"`
Message string `json:"message"`
ResponseModel string `json:"responseModel,omitempty"`
}
// 5.2.6, 5.2.7 Models Object
type Model struct {
Id string `json:"id"`
Description string `json:"description,omitempty"`
Required []string `json:"required,omitempty"`
Properties ModelPropertyList `json:"properties"`
SubTypes []string `json:"subTypes,omitempty"`
Discriminator string `json:"discriminator,omitempty"`
}
// 5.2.8 Properties Object
type ModelProperty struct {
DataTypeFields
Description string `json:"description,omitempty"`
}
// 5.2.10
type Authorizations map[string]Authorization

View File

@ -0,0 +1,21 @@
package swagger
type SwaggerBuilder struct {
SwaggerService
}
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
return &SwaggerBuilder{*newSwaggerService(config)}
}
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
return sb.SwaggerService.produceListing()
}
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
return sb.SwaggerService.produceAllDeclarations()
}
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
return sb.SwaggerService.produceDeclarations(route)
}

View File

@ -0,0 +1,440 @@
package swagger
import (
"fmt"
"github.com/emicklei/go-restful"
// "github.com/emicklei/hopwatch"
"net/http"
"reflect"
"sort"
"strings"
"github.com/emicklei/go-restful/log"
)
type SwaggerService struct {
config Config
apiDeclarationMap *ApiDeclarationList
}
func newSwaggerService(config Config) *SwaggerService {
sws := &SwaggerService{
config: config,
apiDeclarationMap: new(ApiDeclarationList)}
// Build all ApiDeclarations
for _, each := range config.WebServices {
rootPath := each.RootPath()
// skip the api service itself
if rootPath != config.ApiPath {
if rootPath == "" || rootPath == "/" {
// use routes
for _, route := range each.Routes() {
entry := staticPathFromRoute(route)
_, exists := sws.apiDeclarationMap.At(entry)
if !exists {
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
}
}
} else { // use root path
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
}
}
}
// if specified then call the PostBuilderHandler
if config.PostBuildHandler != nil {
config.PostBuildHandler(sws.apiDeclarationMap)
}
return sws
}
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
var LogInfo = func(format string, v ...interface{}) {
// use the restful package-wide logger
log.Printf(format, v...)
}
// InstallSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func InstallSwaggerService(aSwaggerConfig Config) {
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
}
// RegisterSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
sws := newSwaggerService(config)
ws := new(restful.WebService)
ws.Path(config.ApiPath)
ws.Produces(restful.MIME_JSON)
if config.DisableCORS {
ws.Filter(enableCORS)
}
ws.Route(ws.GET("/").To(sws.getListing))
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
wsContainer.Add(ws)
// Check paths for UI serving
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
//if we define a custom static handler use it
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
} else {
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
}
}
func staticPathFromRoute(r restful.Route) string {
static := r.Path
bracket := strings.Index(static, "{")
if bracket <= 1 { // result cannot be empty
return static
}
if bracket != -1 {
static = r.Path[:bracket]
}
if strings.HasSuffix(static, "/") {
return static[:len(static)-1]
} else {
return static
}
}
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
// prevent duplicate header
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
}
}
chain.ProcessFilter(req, resp)
}
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
listing := sws.produceListing()
resp.WriteAsJson(listing)
}
func (sws SwaggerService) produceListing() ResourceListing {
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
ref := Resource{Path: k}
if len(v.Apis) > 0 { // use description of first (could still be empty)
ref.Description = v.Apis[0].Description
}
listing.Apis = append(listing.Apis, ref)
})
return listing
}
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
decl, ok := sws.produceDeclarations(composeRootPath(req))
if !ok {
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
return
}
// unless WebServicesUrl is given
if len(sws.config.WebServicesUrl) == 0 {
// update base path from the actual request
// TODO how to detect https? assume http for now
var host string
// X-Forwarded-Host or Host or Request.Host
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
if !ok || len(hostvalues) == 0 {
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
if !ok || len(forwarded) == 0 {
// fallback to Host field
host = req.Request.Host
} else {
host = forwarded[0]
}
} else {
host = hostvalues[0]
}
// inspect Referer for the scheme (http vs https)
scheme := "http"
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
if strings.HasPrefix(referer[0], "https") {
scheme = "https"
}
}
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
}
resp.WriteAsJson(decl)
}
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
decls := map[string]ApiDeclaration{}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
decls[k] = v
})
return decls
}
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
decl, ok := sws.apiDeclarationMap.At(route)
if !ok {
return nil, false
}
decl.BasePath = sws.config.WebServicesUrl
return &decl, true
}
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
decl := ApiDeclaration{
SwaggerVersion: swaggerVersion,
BasePath: sws.config.WebServicesUrl,
ResourcePath: pathPrefix,
Models: ModelList{},
ApiVersion: ws.Version()}
// collect any path parameters
rootParams := []Parameter{}
for _, param := range ws.PathParameters() {
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
}
// aggregate by path
pathToRoutes := newOrderedRouteMap()
for _, other := range ws.Routes() {
if strings.HasPrefix(other.Path, pathPrefix) {
pathToRoutes.Add(other.Path, other)
}
}
pathToRoutes.Do(func(path string, routes []restful.Route) {
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
voidString := "void"
for _, route := range routes {
operation := Operation{
Method: route.Method,
Summary: route.Doc,
Notes: route.Notes,
// Type gets overwritten if there is a write sample
DataTypeFields: DataTypeFields{Type: &voidString},
Parameters: []Parameter{},
Nickname: route.Operation,
ResponseMessages: composeResponseMessages(route, &decl)}
operation.Consumes = route.Consumes
operation.Produces = route.Produces
// share root params if any
for _, swparam := range rootParams {
operation.Parameters = append(operation.Parameters, swparam)
}
// route specific params
for _, param := range route.ParameterDocs {
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
}
sws.addModelsFromRouteTo(&operation, route, &decl)
api.Operations = append(api.Operations, operation)
}
decl.Apis = append(decl.Apis, api)
})
return decl
}
func withoutWildcard(path string) string {
if strings.HasSuffix(path, ":*}") {
return path[0:len(path)-3] + "}"
}
return path
}
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (messages []ResponseMessage) {
if route.ResponseErrors == nil {
return messages
}
// sort by code
codes := sort.IntSlice{}
for code, _ := range route.ResponseErrors {
codes = append(codes, code)
}
codes.Sort()
for _, code := range codes {
each := route.ResponseErrors[code]
message := ResponseMessage{
Code: code,
Message: each.Message,
}
if each.Model != nil {
st := reflect.TypeOf(each.Model)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
if isCollection {
modelName = "array[" + modelName + "]"
}
modelBuilder{&decl.Models}.addModel(st, "")
// reference the model
message.ResponseModel = modelName
}
messages = append(messages, message)
}
return
}
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
if route.ReadSample != nil {
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
}
if route.WriteSample != nil {
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
}
}
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
isCollection := false
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
st = st.Elem()
isCollection = true
} else {
if st.Kind() == reflect.Ptr {
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
st = st.Elem().Elem()
isCollection = true
}
}
}
return isCollection, st
}
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
if isResponse {
type_, items := asDataType(sample)
operation.Type = type_
operation.Items = items
}
modelBuilder{models}.addModelFrom(sample)
}
func asSwaggerParameter(param restful.ParameterData) Parameter {
return Parameter{
DataTypeFields: DataTypeFields{
Type: &param.DataType,
Format: asFormat(param.DataType, param.DataFormat),
DefaultValue: Special(param.DefaultValue),
},
Name: param.Name,
Description: param.Description,
ParamType: asParamType(param.Kind),
Required: param.Required}
}
// Between 1..7 path parameters is supported
func composeRootPath(req *restful.Request) string {
path := "/" + req.PathParameter("a")
b := req.PathParameter("b")
if b == "" {
return path
}
path = path + "/" + b
c := req.PathParameter("c")
if c == "" {
return path
}
path = path + "/" + c
d := req.PathParameter("d")
if d == "" {
return path
}
path = path + "/" + d
e := req.PathParameter("e")
if e == "" {
return path
}
path = path + "/" + e
f := req.PathParameter("f")
if f == "" {
return path
}
path = path + "/" + f
g := req.PathParameter("g")
if g == "" {
return path
}
return path + "/" + g
}
func asFormat(dataType string, dataFormat string) string {
if dataFormat != "" {
return dataFormat
}
return "" // TODO
}
func asParamType(kind int) string {
switch {
case kind == restful.PathParameterKind:
return "path"
case kind == restful.QueryParameterKind:
return "query"
case kind == restful.BodyParameterKind:
return "body"
case kind == restful.HeaderParameterKind:
return "header"
case kind == restful.FormParameterKind:
return "form"
}
return ""
}
func asDataType(any interface{}) (*string, *Item) {
// If it's not a collection, return the suggested model name
st := reflect.TypeOf(any)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
// if it's not a collection we are done
if !isCollection {
return &modelName, nil
}
// XXX: This is not very elegant
// We create an Item object referring to the given model
models := ModelList{}
mb := modelBuilder{&models}
mb.addModelFrom(any)
elemTypeName := mb.getElementTypeName(modelName, "", st)
item := new(Item)
if mb.isPrimitiveType(elemTypeName) {
mapped := mb.jsonSchemaType(elemTypeName)
item.Type = &mapped
} else {
item.Ref = &elemTypeName
}
tmp := "array"
return &tmp, item
}

268
vendor/github.com/emicklei/go-restful/web_service.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
package restful
import (
"fmt"
"os"
"sync"
"github.com/emicklei/go-restful/log"
)
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
type WebService struct {
rootPath string
pathExpr *pathExpression // cached compilation of rootPath as RegExp
routes []Route
produces []string
consumes []string
pathParameters []*Parameter
filters []FilterFunction
documentation string
apiVersion string
dynamicRoutes bool
// protects 'routes' if dynamic routes are enabled
routesLock sync.RWMutex
}
func (w *WebService) SetDynamicRoutes(enable bool) {
w.dynamicRoutes = enable
}
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
func (w *WebService) compilePathExpression() {
compiled, err := newPathExpression(w.rootPath)
if err != nil {
log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
os.Exit(1)
}
w.pathExpr = compiled
}
// ApiVersion sets the API version for documentation purposes.
func (w *WebService) ApiVersion(apiVersion string) *WebService {
w.apiVersion = apiVersion
return w
}
// Version returns the API version for documentation purposes.
func (w WebService) Version() string { return w.apiVersion }
// Path specifies the root URL template path of the WebService.
// All Routes will be relative to this path.
func (w *WebService) Path(root string) *WebService {
w.rootPath = root
if len(w.rootPath) == 0 {
w.rootPath = "/"
}
w.compilePathExpression()
return w
}
// Param adds a PathParameter to document parameters used in the root path.
func (w *WebService) Param(parameter *Parameter) *WebService {
if w.pathParameters == nil {
w.pathParameters = []*Parameter{}
}
w.pathParameters = append(w.pathParameters, parameter)
return w
}
// PathParameter creates a new Parameter of kind Path for documentation purposes.
// It is initialized as required with string as its DataType.
func (w *WebService) PathParameter(name, description string) *Parameter {
return PathParameter(name, description)
}
// PathParameter creates a new Parameter of kind Path for documentation purposes.
// It is initialized as required with string as its DataType.
func PathParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
p.bePath()
return p
}
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
// It is initialized as not required with string as its DataType.
func (w *WebService) QueryParameter(name, description string) *Parameter {
return QueryParameter(name, description)
}
// QueryParameter creates a new Parameter of kind Query for documentation purposes.
// It is initialized as not required with string as its DataType.
func QueryParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
p.beQuery()
return p
}
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
// It is initialized as required without a DataType.
func (w *WebService) BodyParameter(name, description string) *Parameter {
return BodyParameter(name, description)
}
// BodyParameter creates a new Parameter of kind Body for documentation purposes.
// It is initialized as required without a DataType.
func BodyParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
p.beBody()
return p
}
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
// It is initialized as not required with string as its DataType.
func (w *WebService) HeaderParameter(name, description string) *Parameter {
return HeaderParameter(name, description)
}
// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
// It is initialized as not required with string as its DataType.
func HeaderParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
p.beHeader()
return p
}
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
// It is initialized as required with string as its DataType.
func (w *WebService) FormParameter(name, description string) *Parameter {
return FormParameter(name, description)
}
// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
// It is initialized as required with string as its DataType.
func FormParameter(name, description string) *Parameter {
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
p.beForm()
return p
}
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
func (w *WebService) Route(builder *RouteBuilder) *WebService {
w.routesLock.Lock()
defer w.routesLock.Unlock()
builder.copyDefaults(w.produces, w.consumes)
w.routes = append(w.routes, builder.Build())
return w
}
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
func (w *WebService) RemoveRoute(path, method string) error {
if !w.dynamicRoutes {
return fmt.Errorf("dynamic routes are not enabled.")
}
w.routesLock.Lock()
defer w.routesLock.Unlock()
newRoutes := make([]Route, (len(w.routes) - 1))
current := 0
for ix := range w.routes {
if w.routes[ix].Method == method && w.routes[ix].Path == path {
continue
}
newRoutes[current] = w.routes[ix]
current = current + 1
}
w.routes = newRoutes
return nil
}
// Method creates a new RouteBuilder and initialize its http method
func (w *WebService) Method(httpMethod string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
}
// Produces specifies that this WebService can produce one or more MIME types.
// Http requests must have one of these values set for the Accept header.
func (w *WebService) Produces(contentTypes ...string) *WebService {
w.produces = contentTypes
return w
}
// Consumes specifies that this WebService can consume one or more MIME types.
// Http requests must have one of these values set for the Content-Type header.
func (w *WebService) Consumes(accepts ...string) *WebService {
w.consumes = accepts
return w
}
// Routes returns the Routes associated with this WebService
func (w WebService) Routes() []Route {
if !w.dynamicRoutes {
return w.routes
}
// Make a copy of the array to prevent concurrency problems
w.routesLock.RLock()
defer w.routesLock.RUnlock()
result := make([]Route, len(w.routes))
for ix := range w.routes {
result[ix] = w.routes[ix]
}
return result
}
// RootPath returns the RootPath associated with this WebService. Default "/"
func (w WebService) RootPath() string {
return w.rootPath
}
// PathParameters return the path parameter names for (shared amoung its Routes)
func (w WebService) PathParameters() []*Parameter {
return w.pathParameters
}
// Filter adds a filter function to the chain of filters applicable to all its Routes
func (w *WebService) Filter(filter FilterFunction) *WebService {
w.filters = append(w.filters, filter)
return w
}
// Doc is used to set the documentation of this service.
func (w *WebService) Doc(plainText string) *WebService {
w.documentation = plainText
return w
}
// Documentation returns it.
func (w WebService) Documentation() string {
return w.documentation
}
/*
Convenience methods
*/
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
func (w *WebService) HEAD(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
}
// GET is a shortcut for .Method("GET").Path(subPath)
func (w *WebService) GET(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
}
// POST is a shortcut for .Method("POST").Path(subPath)
func (w *WebService) POST(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
}
// PUT is a shortcut for .Method("PUT").Path(subPath)
func (w *WebService) PUT(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
}
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
func (w *WebService) PATCH(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
}
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
func (w *WebService) DELETE(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
}

View File

@ -0,0 +1,39 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"net/http"
)
// DefaultContainer is a restful.Container that uses http.DefaultServeMux
var DefaultContainer *Container
func init() {
DefaultContainer = NewContainer()
DefaultContainer.ServeMux = http.DefaultServeMux
}
// If set the true then panics will not be caught to return HTTP 500.
// In that case, Route functions are responsible for handling any error situation.
// Default value is false = recover from panics. This has performance implications.
// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
var DoNotRecover = false
// Add registers a new WebService add it to the DefaultContainer.
func Add(service *WebService) {
DefaultContainer.Add(service)
}
// Filter appends a container FilterFunction from the DefaultContainer.
// These are called before dispatching a http.Request to a WebService.
func Filter(filter FilterFunction) {
DefaultContainer.Filter(filter)
}
// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
func RegisteredWebServices() []*WebService {
return DefaultContainer.RegisteredWebServices()
}

View File

@ -0,0 +1,19 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"github.com/openshift/origin/pkg/api/extension"
)
// Convert_runtime_Object_To_runtime_RawExtension ensures an object is converted to the destination version of the conversion.
func Convert_runtime_Object_To_runtime_RawExtension(in *runtime.Object, out *runtime.RawExtension, s conversion.Scope) error {
return extension.Convert_runtime_Object_To_runtime_RawExtension(kapi.Scheme, in, out, s)
}
// Convert_runtime_RawExtension_To_runtime_Object ensures an object is converted to the destination version of the conversion.
func Convert_runtime_RawExtension_To_runtime_Object(in *runtime.RawExtension, out *runtime.Object, s conversion.Scope) error {
return extension.Convert_runtime_RawExtension_To_runtime_Object(kapi.Scheme, in, out, s)
}

6
vendor/github.com/openshift/origin/pkg/api/doc.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// Package api includes all OpenShift-specific types used to communicate
// between the various parts of the OpenShift and the Kubernetes systems.
//
// Unlike the upstream Kubernetes, API objects in OpenShift are separated
// into individual packages.
package api

View File

@ -0,0 +1,104 @@
package extension
import (
"fmt"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
)
// Convert_runtime_Object_To_runtime_RawExtension attempts to convert runtime.Objects to the appropriate target, returning an error
// if there is insufficient information on the conversion scope to determine the target version.
func Convert_runtime_Object_To_runtime_RawExtension(c runtime.ObjectConvertor, in *runtime.Object, out *runtime.RawExtension, s conversion.Scope) error {
if *in == nil {
return nil
}
obj := *in
switch obj.(type) {
case *runtime.Unknown, *runtime.Unstructured:
out.Raw = nil
out.Object = obj
return nil
}
switch t := s.Meta().Context.(type) {
case runtime.GroupVersioner:
converted, err := c.ConvertToVersion(obj, t)
if err != nil {
return err
}
out.Raw = nil
out.Object = converted
default:
return fmt.Errorf("unrecognized conversion context for versioning: %#v", t)
}
return nil
}
// Convert_runtime_RawExtension_To_runtime_Object attempts to convert an incoming object into the
// appropriate output type.
func Convert_runtime_RawExtension_To_runtime_Object(c runtime.ObjectConvertor, in *runtime.RawExtension, out *runtime.Object, s conversion.Scope) error {
if in == nil || in.Object == nil {
return nil
}
switch in.Object.(type) {
case *runtime.Unknown, *runtime.Unstructured:
*out = in.Object
return nil
}
switch t := s.Meta().Context.(type) {
case runtime.GroupVersioner:
converted, err := c.ConvertToVersion(in.Object, t)
if err != nil {
return err
}
in.Object = converted
*out = converted
default:
return fmt.Errorf("unrecognized conversion context for conversion to internal: %#v (%T)", t, t)
}
return nil
}
// DecodeNestedRawExtensionOrUnknown
func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) {
if ext.Raw == nil || ext.Object != nil {
return
}
obj, gvk, err := d.Decode(ext.Raw, nil, nil)
if err != nil {
unk := &runtime.Unknown{Raw: ext.Raw}
if runtime.IsNotRegisteredError(err) {
if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil {
unk.APIVersion = gvk.GroupVersion().String()
unk.Kind = gvk.Kind
ext.Object = unk
return
}
}
// TODO: record mime-type with the object
if gvk != nil {
unk.APIVersion = gvk.GroupVersion().String()
unk.Kind = gvk.Kind
}
obj = unk
}
ext.Object = obj
}
// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or
// return an error.
func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error {
if ext.Raw != nil || ext.Object == nil {
return nil
}
data, err := runtime.Encode(e, ext.Object)
if err != nil {
return err
}
ext.Raw = data
return nil
}

54
vendor/github.com/openshift/origin/pkg/api/helpers.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package api
import (
"fmt"
"strings"
"k8s.io/kubernetes/pkg/api/validation"
)
var NameMayNotBe = []string{".", ".."}
var NameMayNotContain = []string{"/", "%"}
func MinimalNameRequirements(name string, prefix bool) []string {
for _, illegalName := range NameMayNotBe {
if name == illegalName {
return []string{fmt.Sprintf(`name may not be %q`, illegalName)}
}
}
for _, illegalContent := range NameMayNotContain {
if strings.Contains(name, illegalContent) {
return []string{fmt.Sprintf(`name may not contain %q`, illegalContent)}
}
}
return nil
}
// GetNameValidationFunc returns a name validation function that includes the standard restrictions we want for all types
func GetNameValidationFunc(nameFunc validation.ValidateNameFunc) validation.ValidateNameFunc {
return func(name string, prefix bool) []string {
if reasons := MinimalNameRequirements(name, prefix); len(reasons) != 0 {
return reasons
}
return nameFunc(name, prefix)
}
}
// GetFieldLabelConversionFunc returns a field label conversion func, which does the following:
// * returns overrideLabels[label], value, nil if the specified label exists in the overrideLabels map
// * returns label, value, nil if the specified label exists as a key in the supportedLabels map (values in this map are unused, it is intended to be a prototypical label/value map)
// * otherwise, returns an error
func GetFieldLabelConversionFunc(supportedLabels map[string]string, overrideLabels map[string]string) func(label, value string) (string, string, error) {
return func(label, value string) (string, string, error) {
if label, overridden := overrideLabels[label]; overridden {
return label, value, nil
}
if _, supported := supportedLabels[label]; supported {
return label, value, nil
}
return "", "", fmt.Errorf("field label not supported: %s", label)
}
}

19
vendor/github.com/openshift/origin/pkg/api/matcher.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
)
func ListOptionsToSelectors(options *kapi.ListOptions) (labels.Selector, fields.Selector) {
label := labels.Everything()
if options != nil && options.LabelSelector != nil {
label = options.LabelSelector
}
field := fields.Everything()
if options != nil && options.FieldSelector != nil {
field = options.FieldSelector
}
return label, field
}

33
vendor/github.com/openshift/origin/pkg/api/register.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package api
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
_ "github.com/openshift/origin/pkg/authorization/api"
_ "github.com/openshift/origin/pkg/build/api"
_ "github.com/openshift/origin/pkg/deploy/api"
_ "github.com/openshift/origin/pkg/image/api"
_ "github.com/openshift/origin/pkg/oauth/api"
_ "github.com/openshift/origin/pkg/project/api"
_ "github.com/openshift/origin/pkg/route/api"
_ "github.com/openshift/origin/pkg/sdn/api"
_ "github.com/openshift/origin/pkg/security/api"
_ "github.com/openshift/origin/pkg/template/api"
_ "github.com/openshift/origin/pkg/user/api"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) unversioned.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) unversioned.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

1
vendor/github.com/openshift/origin/pkg/api/types.go generated vendored Normal file
View File

@ -0,0 +1 @@
package api

View File

@ -0,0 +1,248 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
)
// policies
func ToPolicyList(in *ClusterPolicyList) *PolicyList {
ret := &PolicyList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToPolicy(&curr))
}
return ret
}
func ToPolicy(in *ClusterPolicy) *Policy {
if in == nil {
return nil
}
ret := &Policy{}
ret.ObjectMeta = in.ObjectMeta
ret.LastModified = in.LastModified
ret.Roles = ToRoleMap(in.Roles)
return ret
}
func ToRoleMap(in map[string]*ClusterRole) map[string]*Role {
ret := map[string]*Role{}
for key, role := range in {
ret[key] = ToRole(role)
}
return ret
}
func ToRoleList(in *ClusterRoleList) *RoleList {
ret := &RoleList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToRole(&curr))
}
return ret
}
func ToRole(in *ClusterRole) *Role {
if in == nil {
return nil
}
ret := &Role{}
ret.ObjectMeta = in.ObjectMeta
ret.Rules = in.Rules
return ret
}
func ToClusterPolicyList(in *PolicyList) *ClusterPolicyList {
ret := &ClusterPolicyList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToClusterPolicy(&curr))
}
return ret
}
func ToClusterPolicy(in *Policy) *ClusterPolicy {
if in == nil {
return nil
}
ret := &ClusterPolicy{}
ret.ObjectMeta = in.ObjectMeta
ret.LastModified = in.LastModified
ret.Roles = ToClusterRoleMap(in.Roles)
return ret
}
func ToClusterRoleMap(in map[string]*Role) map[string]*ClusterRole {
ret := map[string]*ClusterRole{}
for key, role := range in {
ret[key] = ToClusterRole(role)
}
return ret
}
func ToClusterRoleList(in *RoleList) *ClusterRoleList {
ret := &ClusterRoleList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToClusterRole(&curr))
}
return ret
}
func ToClusterRole(in *Role) *ClusterRole {
if in == nil {
return nil
}
ret := &ClusterRole{}
ret.ObjectMeta = in.ObjectMeta
ret.Rules = in.Rules
return ret
}
// policy bindings
func ToPolicyBindingList(in *ClusterPolicyBindingList) *PolicyBindingList {
ret := &PolicyBindingList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToPolicyBinding(&curr))
}
return ret
}
func ToPolicyBinding(in *ClusterPolicyBinding) *PolicyBinding {
if in == nil {
return nil
}
ret := &PolicyBinding{}
ret.ObjectMeta = in.ObjectMeta
ret.LastModified = in.LastModified
ret.PolicyRef = ToPolicyRef(in.PolicyRef)
ret.RoleBindings = ToRoleBindingMap(in.RoleBindings)
return ret
}
func ToPolicyRef(in kapi.ObjectReference) kapi.ObjectReference {
ret := kapi.ObjectReference{}
ret.Name = in.Name
return ret
}
func ToRoleBindingMap(in map[string]*ClusterRoleBinding) map[string]*RoleBinding {
ret := map[string]*RoleBinding{}
for key, RoleBinding := range in {
ret[key] = ToRoleBinding(RoleBinding)
}
return ret
}
func ToRoleBindingList(in *ClusterRoleBindingList) *RoleBindingList {
ret := &RoleBindingList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToRoleBinding(&curr))
}
return ret
}
func ToRoleBinding(in *ClusterRoleBinding) *RoleBinding {
if in == nil {
return nil
}
ret := &RoleBinding{}
ret.ObjectMeta = in.ObjectMeta
ret.Subjects = in.Subjects
ret.RoleRef = ToRoleRef(in.RoleRef)
return ret
}
func ToRoleRef(in kapi.ObjectReference) kapi.ObjectReference {
ret := kapi.ObjectReference{}
ret.Name = in.Name
return ret
}
func ToClusterPolicyBindingList(in *PolicyBindingList) *ClusterPolicyBindingList {
ret := &ClusterPolicyBindingList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToClusterPolicyBinding(&curr))
}
return ret
}
func ToClusterPolicyBinding(in *PolicyBinding) *ClusterPolicyBinding {
if in == nil {
return nil
}
ret := &ClusterPolicyBinding{}
ret.ObjectMeta = in.ObjectMeta
ret.LastModified = in.LastModified
ret.PolicyRef = ToClusterPolicyRef(in.PolicyRef)
ret.RoleBindings = ToClusterRoleBindingMap(in.RoleBindings)
return ret
}
func ToClusterPolicyRef(in kapi.ObjectReference) kapi.ObjectReference {
ret := kapi.ObjectReference{}
ret.Name = in.Name
return ret
}
func ToClusterRoleBindingMap(in map[string]*RoleBinding) map[string]*ClusterRoleBinding {
ret := map[string]*ClusterRoleBinding{}
for key, RoleBinding := range in {
ret[key] = ToClusterRoleBinding(RoleBinding)
}
return ret
}
func ToClusterRoleBindingList(in *RoleBindingList) *ClusterRoleBindingList {
ret := &ClusterRoleBindingList{}
for _, curr := range in.Items {
ret.Items = append(ret.Items, *ToClusterRoleBinding(&curr))
}
return ret
}
func ToClusterRoleBinding(in *RoleBinding) *ClusterRoleBinding {
if in == nil {
return nil
}
ret := &ClusterRoleBinding{}
ret.ObjectMeta = in.ObjectMeta
ret.Subjects = in.Subjects
ret.RoleRef = ToClusterRoleRef(in.RoleRef)
return ret
}
func ToClusterRoleRef(in kapi.ObjectReference) kapi.ObjectReference {
ret := kapi.ObjectReference{}
ret.Name = in.Name
return ret
}

View File

@ -0,0 +1,678 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package api
import (
api "k8s.io/kubernetes/pkg/api"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
conversion "k8s.io/kubernetes/pkg/conversion"
runtime "k8s.io/kubernetes/pkg/runtime"
sets "k8s.io/kubernetes/pkg/util/sets"
)
func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_api_AuthorizationAttributes,
DeepCopy_api_ClusterPolicy,
DeepCopy_api_ClusterPolicyBinding,
DeepCopy_api_ClusterPolicyBindingList,
DeepCopy_api_ClusterPolicyList,
DeepCopy_api_ClusterRole,
DeepCopy_api_ClusterRoleBinding,
DeepCopy_api_ClusterRoleBindingList,
DeepCopy_api_ClusterRoleList,
DeepCopy_api_IsPersonalSubjectAccessReview,
DeepCopy_api_LocalResourceAccessReview,
DeepCopy_api_LocalSubjectAccessReview,
DeepCopy_api_Policy,
DeepCopy_api_PolicyBinding,
DeepCopy_api_PolicyBindingList,
DeepCopy_api_PolicyList,
DeepCopy_api_PolicyRule,
DeepCopy_api_ResourceAccessReview,
DeepCopy_api_ResourceAccessReviewResponse,
DeepCopy_api_Role,
DeepCopy_api_RoleBinding,
DeepCopy_api_RoleBindingList,
DeepCopy_api_RoleList,
DeepCopy_api_SelfSubjectRulesReview,
DeepCopy_api_SelfSubjectRulesReviewSpec,
DeepCopy_api_SubjectAccessReview,
DeepCopy_api_SubjectAccessReviewResponse,
DeepCopy_api_SubjectRulesReviewStatus,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
func DeepCopy_api_AuthorizationAttributes(in AuthorizationAttributes, out *AuthorizationAttributes, c *conversion.Cloner) error {
out.Namespace = in.Namespace
out.Verb = in.Verb
out.Group = in.Group
out.Version = in.Version
out.Resource = in.Resource
out.ResourceName = in.ResourceName
if in.Content == nil {
out.Content = nil
} else if newVal, err := c.DeepCopy(in.Content); err != nil {
return err
} else {
out.Content = newVal.(runtime.Object)
}
return nil
}
func DeepCopy_api_ClusterPolicy(in ClusterPolicy, out *ClusterPolicy, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_Time(in.LastModified, &out.LastModified, c); err != nil {
return err
}
if in.Roles != nil {
in, out := in.Roles, &out.Roles
*out = make(map[string]*ClusterRole)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(*ClusterRole)
}
}
} else {
out.Roles = nil
}
return nil
}
func DeepCopy_api_ClusterPolicyBinding(in ClusterPolicyBinding, out *ClusterPolicyBinding, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_Time(in.LastModified, &out.LastModified, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectReference(in.PolicyRef, &out.PolicyRef, c); err != nil {
return err
}
if in.RoleBindings != nil {
in, out := in.RoleBindings, &out.RoleBindings
*out = make(map[string]*ClusterRoleBinding)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(*ClusterRoleBinding)
}
}
} else {
out.RoleBindings = nil
}
return nil
}
func DeepCopy_api_ClusterPolicyBindingList(in ClusterPolicyBindingList, out *ClusterPolicyBindingList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]ClusterPolicyBinding, len(in))
for i := range in {
if err := DeepCopy_api_ClusterPolicyBinding(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_ClusterPolicyList(in ClusterPolicyList, out *ClusterPolicyList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]ClusterPolicy, len(in))
for i := range in {
if err := DeepCopy_api_ClusterPolicy(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Rules != nil {
in, out := in.Rules, &out.Rules
*out = make([]PolicyRule, len(in))
for i := range in {
if err := DeepCopy_api_PolicyRule(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func DeepCopy_api_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Subjects != nil {
in, out := in.Subjects, &out.Subjects
*out = make([]api.ObjectReference, len(in))
for i := range in {
if err := api.DeepCopy_api_ObjectReference(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Subjects = nil
}
if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]ClusterRoleBinding, len(in))
for i := range in {
if err := DeepCopy_api_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]ClusterRole, len(in))
for i := range in {
if err := DeepCopy_api_ClusterRole(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_IsPersonalSubjectAccessReview(in IsPersonalSubjectAccessReview, out *IsPersonalSubjectAccessReview, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_LocalResourceAccessReview(in LocalResourceAccessReview, out *LocalResourceAccessReview, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := DeepCopy_api_AuthorizationAttributes(in.Action, &out.Action, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := DeepCopy_api_AuthorizationAttributes(in.Action, &out.Action, c); err != nil {
return err
}
out.User = in.User
if in.Groups != nil {
in, out := in.Groups, &out.Groups
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.Groups = nil
}
if in.Scopes != nil {
in, out := in.Scopes, &out.Scopes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Scopes = nil
}
return nil
}
func DeepCopy_api_Policy(in Policy, out *Policy, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_Time(in.LastModified, &out.LastModified, c); err != nil {
return err
}
if in.Roles != nil {
in, out := in.Roles, &out.Roles
*out = make(map[string]*Role)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(*Role)
}
}
} else {
out.Roles = nil
}
return nil
}
func DeepCopy_api_PolicyBinding(in PolicyBinding, out *PolicyBinding, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_Time(in.LastModified, &out.LastModified, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectReference(in.PolicyRef, &out.PolicyRef, c); err != nil {
return err
}
if in.RoleBindings != nil {
in, out := in.RoleBindings, &out.RoleBindings
*out = make(map[string]*RoleBinding)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(*RoleBinding)
}
}
} else {
out.RoleBindings = nil
}
return nil
}
func DeepCopy_api_PolicyBindingList(in PolicyBindingList, out *PolicyBindingList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]PolicyBinding, len(in))
for i := range in {
if err := DeepCopy_api_PolicyBinding(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_PolicyList(in PolicyList, out *PolicyList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]Policy, len(in))
for i := range in {
if err := DeepCopy_api_Policy(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error {
if in.Verbs != nil {
in, out := in.Verbs, &out.Verbs
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.Verbs = nil
}
if in.AttributeRestrictions == nil {
out.AttributeRestrictions = nil
} else if newVal, err := c.DeepCopy(in.AttributeRestrictions); err != nil {
return err
} else {
out.AttributeRestrictions = newVal.(runtime.Object)
}
if in.APIGroups != nil {
in, out := in.APIGroups, &out.APIGroups
*out = make([]string, len(in))
copy(*out, in)
} else {
out.APIGroups = nil
}
if in.Resources != nil {
in, out := in.Resources, &out.Resources
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.Resources = nil
}
if in.ResourceNames != nil {
in, out := in.ResourceNames, &out.ResourceNames
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.ResourceNames = nil
}
if in.NonResourceURLs != nil {
in, out := in.NonResourceURLs, &out.NonResourceURLs
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.NonResourceURLs = nil
}
return nil
}
func DeepCopy_api_ResourceAccessReview(in ResourceAccessReview, out *ResourceAccessReview, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := DeepCopy_api_AuthorizationAttributes(in.Action, &out.Action, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_ResourceAccessReviewResponse(in ResourceAccessReviewResponse, out *ResourceAccessReviewResponse, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Namespace = in.Namespace
if in.Users != nil {
in, out := in.Users, &out.Users
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.Users = nil
}
if in.Groups != nil {
in, out := in.Groups, &out.Groups
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.Groups = nil
}
out.EvaluationError = in.EvaluationError
return nil
}
func DeepCopy_api_Role(in Role, out *Role, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Rules != nil {
in, out := in.Rules, &out.Rules
*out = make([]PolicyRule, len(in))
for i := range in {
if err := DeepCopy_api_PolicyRule(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Rules = nil
}
return nil
}
func DeepCopy_api_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Subjects != nil {
in, out := in.Subjects, &out.Subjects
*out = make([]api.ObjectReference, len(in))
for i := range in {
if err := api.DeepCopy_api_ObjectReference(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Subjects = nil
}
if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]RoleBinding, len(in))
for i := range in {
if err := DeepCopy_api_RoleBinding(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]Role, len(in))
for i := range in {
if err := DeepCopy_api_Role(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_SelfSubjectRulesReview(in SelfSubjectRulesReview, out *SelfSubjectRulesReview, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := DeepCopy_api_SelfSubjectRulesReviewSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_api_SubjectRulesReviewStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_SelfSubjectRulesReviewSpec(in SelfSubjectRulesReviewSpec, out *SelfSubjectRulesReviewSpec, c *conversion.Cloner) error {
if in.Scopes != nil {
in, out := in.Scopes, &out.Scopes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Scopes = nil
}
return nil
}
func DeepCopy_api_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := DeepCopy_api_AuthorizationAttributes(in.Action, &out.Action, c); err != nil {
return err
}
out.User = in.User
if in.Groups != nil {
in, out := in.Groups, &out.Groups
*out = make(sets.String)
for key, val := range in {
if newVal, err := c.DeepCopy(val); err != nil {
return err
} else {
(*out)[key] = newVal.(sets.Empty)
}
}
} else {
out.Groups = nil
}
if in.Scopes != nil {
in, out := in.Scopes, &out.Scopes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Scopes = nil
}
return nil
}
func DeepCopy_api_SubjectAccessReviewResponse(in SubjectAccessReviewResponse, out *SubjectAccessReviewResponse, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Namespace = in.Namespace
out.Allowed = in.Allowed
out.Reason = in.Reason
return nil
}
func DeepCopy_api_SubjectRulesReviewStatus(in SubjectRulesReviewStatus, out *SubjectRulesReviewStatus, c *conversion.Cloner) error {
if in.Rules != nil {
in, out := in.Rules, &out.Rules
*out = make([]PolicyRule, len(in))
for i := range in {
if err := DeepCopy_api_PolicyRule(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Rules = nil
}
out.EvaluationError = in.EvaluationError
return nil
}

View File

@ -0,0 +1,91 @@
package api
import (
"k8s.io/kubernetes/pkg/util/sets"
)
// NEVER TOUCH ANYTHING IN THIS FILE!
const (
// resourceGroupPrefix is the prefix for indicating that a resource entry is actually a group of resources. The groups are defined in code and indicate resources that are commonly permissioned together
resourceGroupPrefix = "resourcegroup:"
buildGroupName = resourceGroupPrefix + "builds"
deploymentGroupName = resourceGroupPrefix + "deployments"
imageGroupName = resourceGroupPrefix + "images"
oauthGroupName = resourceGroupPrefix + "oauth"
userGroupName = resourceGroupPrefix + "users"
templateGroupName = resourceGroupPrefix + "templates"
sdnGroupName = resourceGroupPrefix + "sdn"
// policyOwnerGroupName includes the physical resources behind the permissionGrantingGroupName. Unless these physical objects are created first, users with privileges to permissionGrantingGroupName will
// only be able to bind to global roles
policyOwnerGroupName = resourceGroupPrefix + "policy"
// permissionGrantingGroupName includes resources that are necessary to maintain authorization roles and bindings. By itself, this group is insufficient to create anything except for bindings
// to master roles. If a local Policy already exists, then privileges to this group will allow for modification of local roles.
permissionGrantingGroupName = resourceGroupPrefix + "granter"
// openshiftExposedGroupName includes resources that are commonly viewed and modified by end users of the system. It does not include any sensitive resources that control authentication or authorization
openshiftExposedGroupName = resourceGroupPrefix + "exposedopenshift"
openshiftAllGroupName = resourceGroupPrefix + "allopenshift"
openshiftStatusGroupName = resourceGroupPrefix + "allopenshift-status"
quotaGroupName = resourceGroupPrefix + "quota"
// kubeInternalsGroupName includes those resources that should reasonably be viewable to end users, but that most users should probably not modify. Kubernetes herself will maintain these resources
kubeInternalsGroupName = resourceGroupPrefix + "privatekube"
// kubeExposedGroupName includes resources that are commonly viewed and modified by end users of the system.
kubeExposedGroupName = resourceGroupPrefix + "exposedkube"
kubeAllGroupName = resourceGroupPrefix + "allkube"
kubeStatusGroupName = resourceGroupPrefix + "allkube-status"
// nonescalatingResourcesGroupName contains all resources that can be viewed without exposing the risk of using view rights to locate a secret to escalate privileges. For example, view
// rights on secrets could be used locate a secret that happened to be serviceaccount token that has more privileges
nonescalatingResourcesGroupName = resourceGroupPrefix + "non-escalating"
kubeNonEscalatingViewableGroupName = resourceGroupPrefix + "kube-non-escalating"
openshiftNonEscalatingViewableGroupName = resourceGroupPrefix + "openshift-non-escalating"
// escalatingResourcesGroupName contains all resources that can be used to escalate privileges when simply viewed
escalatingResourcesGroupName = resourceGroupPrefix + "escalating"
kubeEscalatingViewableGroupName = resourceGroupPrefix + "kube-escalating"
openshiftEscalatingViewableGroupName = resourceGroupPrefix + "openshift-escalating"
)
var (
groupsToResources = map[string][]string{
buildGroupName: {"builds", "buildconfigs", "buildlogs", "buildconfigs/instantiate", "buildconfigs/instantiatebinary", "builds/log", "builds/clone", "buildconfigs/webhooks"},
imageGroupName: {"imagestreams", "imagestreammappings", "imagestreamtags", "imagestreamimages", "imagestreamimports"},
deploymentGroupName: {"deploymentconfigs", "generatedeploymentconfigs", "deploymentconfigrollbacks", "deploymentconfigs/log", "deploymentconfigs/scale"},
sdnGroupName: {"clusternetworks", "hostsubnets", "netnamespaces"},
templateGroupName: {"templates", "templateconfigs", "processedtemplates"},
userGroupName: {"identities", "users", "useridentitymappings", "groups"},
oauthGroupName: {"oauthauthorizetokens", "oauthaccesstokens", "oauthclients", "oauthclientauthorizations"},
policyOwnerGroupName: {"policies", "policybindings"},
// RAR and SAR are in this list to support backwards compatibility with clients that expect access to those resource in a namespace scope and a cluster scope.
// TODO remove once we have eliminated the namespace scoped resource.
permissionGrantingGroupName: {"roles", "rolebindings", "resourceaccessreviews" /* cluster scoped*/, "subjectaccessreviews" /* cluster scoped*/, "localresourceaccessreviews", "localsubjectaccessreviews"},
openshiftExposedGroupName: {buildGroupName, imageGroupName, deploymentGroupName, templateGroupName, "routes"},
openshiftAllGroupName: {openshiftExposedGroupName, userGroupName, oauthGroupName, policyOwnerGroupName, sdnGroupName, permissionGrantingGroupName, openshiftStatusGroupName, "projects",
"clusterroles", "clusterrolebindings", "clusterpolicies", "clusterpolicybindings", "images" /* cluster scoped*/, "projectrequests", "builds/details", "imagestreams/secrets",
"selfsubjectrulesreviews"},
openshiftStatusGroupName: {"imagestreams/status", "routes/status", "deploymentconfigs/status"},
quotaGroupName: {"limitranges", "resourcequotas", "resourcequotausages"},
kubeExposedGroupName: {"pods", "replicationcontrollers", "serviceaccounts", "services", "endpoints", "persistentvolumeclaims", "pods/log", "configmaps"},
kubeInternalsGroupName: {"minions", "nodes", "bindings", "events", "namespaces", "persistentvolumes", "securitycontextconstraints"},
kubeAllGroupName: {kubeInternalsGroupName, kubeExposedGroupName, quotaGroupName},
kubeStatusGroupName: {"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status"},
openshiftEscalatingViewableGroupName: {"oauthauthorizetokens", "oauthaccesstokens", "imagestreams/secrets"},
kubeEscalatingViewableGroupName: {"secrets"},
escalatingResourcesGroupName: {openshiftEscalatingViewableGroupName, kubeEscalatingViewableGroupName},
nonescalatingResourcesGroupName: {openshiftNonEscalatingViewableGroupName, kubeNonEscalatingViewableGroupName},
}
)
func init() {
// set the non-escalating groups
groupsToResources[openshiftNonEscalatingViewableGroupName] = NormalizeResources(sets.NewString(groupsToResources[openshiftAllGroupName]...)).
Difference(NormalizeResources(sets.NewString(groupsToResources[openshiftEscalatingViewableGroupName]...))).List()
groupsToResources[kubeNonEscalatingViewableGroupName] = NormalizeResources(sets.NewString(groupsToResources[kubeAllGroupName]...)).
Difference(NormalizeResources(sets.NewString(groupsToResources[kubeEscalatingViewableGroupName]...))).List()
}

View File

@ -0,0 +1,56 @@
package api
import "k8s.io/kubernetes/pkg/fields"
// ClusterPolicyToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func ClusterPolicyToSelectableFields(policy *ClusterPolicy) fields.Set {
return fields.Set{
"metadata.name": policy.Name,
}
}
// ClusterPolicyBindingToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func ClusterPolicyBindingToSelectableFields(policyBinding *ClusterPolicyBinding) fields.Set {
return fields.Set{
"metadata.name": policyBinding.Name,
}
}
// PolicyToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func PolicyToSelectableFields(policy *Policy) fields.Set {
return fields.Set{
"metadata.name": policy.Name,
"metadata.namespace": policy.Namespace,
}
}
// PolicyBindingToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func PolicyBindingToSelectableFields(policyBinding *PolicyBinding) fields.Set {
return fields.Set{
"metadata.name": policyBinding.Name,
"metadata.namespace": policyBinding.Namespace,
"policyRef.namespace": policyBinding.PolicyRef.Namespace,
}
}
// RoleToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func RoleToSelectableFields(role *Role) fields.Set {
return fields.Set{
"metadata.name": role.Name,
"metadata.namespace": role.Namespace,
}
}
// RoleBindingToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func RoleBindingToSelectableFields(roleBinding *RoleBinding) fields.Set {
return fields.Set{
"metadata.name": roleBinding.Name,
"metadata.namespace": roleBinding.Namespace,
}
}

View File

@ -0,0 +1,337 @@
package api
import (
"fmt"
"sort"
"strings"
"unicode"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/auth/user"
"k8s.io/kubernetes/pkg/serviceaccount"
"k8s.io/kubernetes/pkg/util/sets"
// uservalidation "github.com/openshift/origin/pkg/user/api/validation"
)
// NormalizeResources expands all resource groups and forces all resources to lower case.
// If the rawResources are already normalized, it returns the original set to avoid the
// allocation and GC cost, since this is hit multiple times for every REST call.
// That means you should NEVER MODIFY THE RESULT of this call.
func NormalizeResources(rawResources sets.String) sets.String {
// we only need to expand groups if the exist and we don't create them with groups
// by default. Only accept the cost of expansion if we're doing work.
needsNormalization := false
for currResource := range rawResources {
if needsNormalizing(currResource) {
needsNormalization = true
break
}
}
if !needsNormalization {
return rawResources
}
ret := sets.String{}
toVisit := rawResources.List()
visited := sets.String{}
for i := 0; i < len(toVisit); i++ {
currResource := toVisit[i]
if visited.Has(currResource) {
continue
}
visited.Insert(currResource)
if !strings.HasPrefix(currResource, resourceGroupPrefix) {
ret.Insert(strings.ToLower(currResource))
continue
}
if resourceTypes, exists := groupsToResources[currResource]; exists {
toVisit = append(toVisit, resourceTypes...)
}
}
return ret
}
func needsNormalizing(in string) bool {
if strings.HasPrefix(in, resourceGroupPrefix) {
return true
}
for _, r := range in {
if unicode.IsUpper(r) {
return true
}
}
return false
}
func (r PolicyRule) String() string {
return "PolicyRule" + r.CompactString()
}
// CompactString exposes a compact string representation for use in escalation error messages
func (r PolicyRule) CompactString() string {
formatStringParts := []string{}
formatArgs := []interface{}{}
if len(r.Verbs) > 0 {
formatStringParts = append(formatStringParts, "Verbs:%q")
formatArgs = append(formatArgs, r.Verbs.List())
}
if len(r.APIGroups) > 0 {
formatStringParts = append(formatStringParts, "APIGroups:%q")
formatArgs = append(formatArgs, r.APIGroups)
}
if len(r.Resources) > 0 {
formatStringParts = append(formatStringParts, "Resources:%q")
formatArgs = append(formatArgs, r.Resources.List())
}
if len(r.ResourceNames) > 0 {
formatStringParts = append(formatStringParts, "ResourceNames:%q")
formatArgs = append(formatArgs, r.ResourceNames.List())
}
if r.AttributeRestrictions != nil {
formatStringParts = append(formatStringParts, "Restrictions:%q")
formatArgs = append(formatArgs, r.AttributeRestrictions)
}
if len(r.NonResourceURLs) > 0 {
formatStringParts = append(formatStringParts, "NonResourceURLs:%q")
formatArgs = append(formatArgs, r.NonResourceURLs.List())
}
formatString := "{" + strings.Join(formatStringParts, ", ") + "}"
return fmt.Sprintf(formatString, formatArgs...)
}
func getRoleBindingValues(roleBindingMap map[string]*RoleBinding) []*RoleBinding {
ret := []*RoleBinding{}
for _, currBinding := range roleBindingMap {
ret = append(ret, currBinding)
}
return ret
}
func SortRoleBindings(roleBindingMap map[string]*RoleBinding, reverse bool) []*RoleBinding {
roleBindings := getRoleBindingValues(roleBindingMap)
if reverse {
sort.Sort(sort.Reverse(RoleBindingSorter(roleBindings)))
} else {
sort.Sort(RoleBindingSorter(roleBindings))
}
return roleBindings
}
type PolicyBindingSorter []PolicyBinding
func (s PolicyBindingSorter) Len() int {
return len(s)
}
func (s PolicyBindingSorter) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s PolicyBindingSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
type RoleBindingSorter []*RoleBinding
func (s RoleBindingSorter) Len() int {
return len(s)
}
func (s RoleBindingSorter) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s RoleBindingSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func GetPolicyBindingName(policyRefNamespace string) string {
return fmt.Sprintf("%s:%s", policyRefNamespace, PolicyName)
}
var ClusterPolicyBindingName = GetPolicyBindingName("")
func BuildSubjects(users, groups []string, userNameValidator, groupNameValidator validation.ValidateNameFunc) []kapi.ObjectReference {
subjects := []kapi.ObjectReference{}
for _, user := range users {
saNamespace, saName, err := serviceaccount.SplitUsername(user)
if err == nil {
subjects = append(subjects, kapi.ObjectReference{Kind: ServiceAccountKind, Namespace: saNamespace, Name: saName})
continue
}
kind := UserKind
if len(userNameValidator(user, false)) != 0 {
kind = SystemUserKind
}
subjects = append(subjects, kapi.ObjectReference{Kind: kind, Name: user})
}
for _, group := range groups {
kind := GroupKind
if len(groupNameValidator(group, false)) != 0 {
kind = SystemGroupKind
}
subjects = append(subjects, kapi.ObjectReference{Kind: kind, Name: group})
}
return subjects
}
// StringSubjectsFor returns users and groups for comparison against user.Info. currentNamespace is used to
// to create usernames for service accounts where namespace=="".
func StringSubjectsFor(currentNamespace string, subjects []kapi.ObjectReference) ([]string, []string) {
// these MUST be nil to indicate empty
var users, groups []string
for _, subject := range subjects {
switch subject.Kind {
case ServiceAccountKind:
namespace := currentNamespace
if len(subject.Namespace) > 0 {
namespace = subject.Namespace
}
if len(namespace) > 0 {
users = append(users, serviceaccount.MakeUsername(namespace, subject.Name))
}
case UserKind, SystemUserKind:
users = append(users, subject.Name)
case GroupKind, SystemGroupKind:
groups = append(groups, subject.Name)
}
}
return users, groups
}
// SubjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. currentNamespace is used to
// hide the subject.Namespace for ServiceAccounts in the currentNamespace
func SubjectsStrings(currentNamespace string, subjects []kapi.ObjectReference) ([]string, []string, []string, []string) {
users := []string{}
groups := []string{}
sas := []string{}
others := []string{}
for _, subject := range subjects {
switch subject.Kind {
case ServiceAccountKind:
if len(subject.Namespace) > 0 && currentNamespace != subject.Namespace {
sas = append(sas, subject.Namespace+"/"+subject.Name)
} else {
sas = append(sas, subject.Name)
}
case UserKind, SystemUserKind:
users = append(users, subject.Name)
case GroupKind, SystemGroupKind:
groups = append(groups, subject.Name)
default:
others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name))
}
}
return users, groups, sas, others
}
func AddUserToSAR(user user.Info, sar *SubjectAccessReview) *SubjectAccessReview {
origScopes := user.GetExtra()[ScopesKey]
scopes := make([]string, len(origScopes), len(origScopes))
copy(scopes, origScopes)
sar.User = user.GetName()
sar.Groups = sets.NewString(user.GetGroups()...)
sar.Scopes = scopes
return sar
}
func AddUserToLSAR(user user.Info, lsar *LocalSubjectAccessReview) *LocalSubjectAccessReview {
origScopes := user.GetExtra()[ScopesKey]
scopes := make([]string, len(origScopes), len(origScopes))
copy(scopes, origScopes)
lsar.User = user.GetName()
lsar.Groups = sets.NewString(user.GetGroups()...)
lsar.Scopes = scopes
return lsar
}
// +gencopy=false
// PolicyRuleBuilder let's us attach methods. A no-no for API types
type PolicyRuleBuilder struct {
PolicyRule PolicyRule
}
func NewRule(verbs ...string) *PolicyRuleBuilder {
return &PolicyRuleBuilder{
PolicyRule: PolicyRule{
Verbs: sets.NewString(verbs...),
Resources: sets.String{},
ResourceNames: sets.String{},
},
}
}
func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder {
r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...)
return r
}
func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder {
r.PolicyRule.Resources.Insert(resources...)
return r
}
func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder {
r.PolicyRule.ResourceNames.Insert(names...)
return r
}
func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule {
ret, err := r.Rule()
if err != nil {
panic(err)
}
return ret
}
func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) {
if len(r.PolicyRule.Verbs) == 0 {
return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule)
}
switch {
case len(r.PolicyRule.NonResourceURLs) > 0:
if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 {
return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule)
}
case len(r.PolicyRule.Resources) > 0:
if len(r.PolicyRule.NonResourceURLs) != 0 {
return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule)
}
if len(r.PolicyRule.APIGroups) == 0 {
return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule)
}
default:
return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule)
}
return r.PolicyRule, nil
}
type SortableRuleSlice []PolicyRule
func (s SortableRuleSlice) Len() int { return len(s) }
func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SortableRuleSlice) Less(i, j int) bool {
return strings.Compare(s[i].String(), s[j].String()) < 0
}

View File

@ -0,0 +1,58 @@
package api
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) unversioned.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) unversioned.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func AddToScheme(scheme *runtime.Scheme) {
// Add the API to Scheme.
addKnownTypes(scheme)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) {
scheme.AddKnownTypes(SchemeGroupVersion,
&Role{},
&RoleBinding{},
&Policy{},
&PolicyBinding{},
&PolicyList{},
&PolicyBindingList{},
&RoleBindingList{},
&RoleList{},
&SelfSubjectRulesReview{},
&ResourceAccessReview{},
&SubjectAccessReview{},
&LocalResourceAccessReview{},
&LocalSubjectAccessReview{},
&ResourceAccessReviewResponse{},
&SubjectAccessReviewResponse{},
&IsPersonalSubjectAccessReview{},
&ClusterRole{},
&ClusterRoleBinding{},
&ClusterPolicy{},
&ClusterPolicyBinding{},
&ClusterPolicyList{},
&ClusterPolicyBindingList{},
&ClusterRoleBindingList{},
&ClusterRoleList{},
)
}

View File

@ -0,0 +1,15 @@
package api
// Synthetic authorization endpoints
const (
DockerBuildResource = "builds/docker"
SourceBuildResource = "builds/source"
CustomBuildResource = "builds/custom"
JenkinsPipelineBuildResource = "builds/jenkinspipeline"
NodeMetricsResource = "nodes/metrics"
NodeStatsResource = "nodes/stats"
NodeLogResource = "nodes/log"
RestrictedEndpointsResource = "endpoints/restricted"
)

View File

@ -0,0 +1,398 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
kruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
)
// Authorization is calculated against
// 1. all deny RoleBinding PolicyRules in the master namespace - short circuit on match
// 2. all allow RoleBinding PolicyRules in the master namespace - short circuit on match
// 3. all deny RoleBinding PolicyRules in the namespace - short circuit on match
// 4. all allow RoleBinding PolicyRules in the namespace - short circuit on match
// 5. deny by default
const (
// PolicyName is the name of Policy
PolicyName = "default"
APIGroupAll = "*"
ResourceAll = "*"
VerbAll = "*"
NonResourceAll = "*"
ScopesKey = "authorization.openshift.io/scopes"
ScopesAllNamespaces = "*"
UserKind = "User"
GroupKind = "Group"
ServiceAccountKind = "ServiceAccount"
SystemUserKind = "SystemUser"
SystemGroupKind = "SystemGroup"
UserResource = "users"
GroupResource = "groups"
ServiceAccountResource = "serviceaccounts"
SystemUserResource = "systemusers"
SystemGroupResource = "systemgroups"
)
// DiscoveryRule is a rule that allows a client to discover the API resources available on this server
var DiscoveryRule = PolicyRule{
Verbs: sets.NewString("get"),
NonResourceURLs: sets.NewString(
// Server version checking
"/version", "/version/*",
// API discovery/negotiation
"/api", "/api/*",
"/apis", "/apis/*",
"/oapi", "/oapi/*",
"/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients
),
}
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
type PolicyRule struct {
// Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
Verbs sets.String
// AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports.
// If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
AttributeRestrictions kruntime.Object
// APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed.
// That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request
// will be allowed
APIGroups []string
// Resources is a list of resources this rule applies to. ResourceAll represents all resources.
Resources sets.String
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
ResourceNames sets.String
// NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
// If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match.
NonResourceURLs sets.String
}
// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed
type IsPersonalSubjectAccessReview struct {
unversioned.TypeMeta
}
// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.
type Role struct {
unversioned.TypeMeta
// Standard object's metadata.
kapi.ObjectMeta
// Rules holds all the PolicyRules for this Role
Rules []PolicyRule
}
// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace.
// It adds who information via Users and Groups and namespace information by which namespace it exists in. RoleBindings in a given
// namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
type RoleBinding struct {
unversioned.TypeMeta
kapi.ObjectMeta
// Subjects hold object references of to authorize with this rule
Subjects []kapi.ObjectReference
// RoleRef can only reference the current namespace and the global namespace
// If the RoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role
RoleRef kapi.ObjectReference
}
// +genclient=true
// Policy is a object that holds all the Roles for a particular namespace. There is at most
// one Policy document per namespace.
type Policy struct {
unversioned.TypeMeta
kapi.ObjectMeta
// LastModified is the last time that any part of the Policy was created, updated, or deleted
LastModified unversioned.Time
// Roles holds all the Roles held by this Policy, mapped by Role.Name
Roles map[string]*Role
}
// PolicyBinding is a object that holds all the RoleBindings for a particular namespace. There is
// one PolicyBinding document per referenced Policy namespace
type PolicyBinding struct {
unversioned.TypeMeta
// Standard object's metadata.
kapi.ObjectMeta
// LastModified is the last time that any part of the PolicyBinding was created, updated, or deleted
LastModified unversioned.Time
// PolicyRef is a reference to the Policy that contains all the Roles that this PolicyBinding's RoleBindings may reference
PolicyRef kapi.ObjectReference
// RoleBindings holds all the RoleBindings held by this PolicyBinding, mapped by RoleBinding.Name
RoleBindings map[string]*RoleBinding
}
// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace
type SelfSubjectRulesReview struct {
unversioned.TypeMeta
// Spec adds information about how to conduct the check
Spec SelfSubjectRulesReviewSpec
// Status is completed by the server to tell which permissions you have
Status SubjectRulesReviewStatus
}
// SelfSubjectRulesReviewSpec adds information about how to conduct the check
type SelfSubjectRulesReviewSpec struct {
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SubjectRulesReview, means "use the scopes on this request".
// Nil for a regular SubjectRulesReview, means the same as empty.
Scopes []string
}
// SubjectRulesReviewStatus is contains the result of a rules check
type SubjectRulesReviewStatus struct {
// Rules is the list of rules (no particular sort) that are allowed for the subject
Rules []PolicyRule
// EvaluationError can appear in combination with Rules. It means some error happened during evaluation
// that may have prevented additional rules from being populated.
EvaluationError string
}
// ResourceAccessReviewResponse describes who can perform the action
type ResourceAccessReviewResponse struct {
unversioned.TypeMeta
// Namespace is the namespace used for the access review
Namespace string
// Users is the list of users who can perform the action
Users sets.String
// Groups is the list of groups who can perform the action
Groups sets.String
// EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned.
// It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is
// most common when a bound role is missing, but enough roles are still present and bound to reason about the request.
EvaluationError string
}
// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the
// action specified by spec
type ResourceAccessReview struct {
unversioned.TypeMeta
// Action describes the action being tested
Action AuthorizationAttributes
}
// SubjectAccessReviewResponse describes whether or not a user or group can perform an action
type SubjectAccessReviewResponse struct {
unversioned.TypeMeta
// Namespace is the namespace used for the access review
Namespace string
// Allowed is required. True if the action would be allowed, false otherwise.
Allowed bool
// Reason is optional. It indicates why a request was allowed or denied.
Reason string
}
// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action
type SubjectAccessReview struct {
unversioned.TypeMeta
// Action describes the action being tested
Action AuthorizationAttributes
// User is optional. If both User and Groups are empty, the current authenticated user is used.
User string
// Groups is optional. Groups is the list of groups to which the User belongs.
Groups sets.String
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
Scopes []string
}
// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace
type LocalResourceAccessReview struct {
unversioned.TypeMeta
// Action describes the action being tested
Action AuthorizationAttributes
}
// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace
type LocalSubjectAccessReview struct {
unversioned.TypeMeta
// Action describes the action being tested. The Namespace element is FORCED to the current namespace.
Action AuthorizationAttributes
// User is optional. If both User and Groups are empty, the current authenticated user is used.
User string
// Groups is optional. Groups is the list of groups to which the User belongs.
Groups sets.String
// Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups".
// Nil for a self-SAR, means "use the scopes on this request".
// Nil for a regular SAR, means the same as empty.
Scopes []string
}
// AuthorizationAttributes describes a request to be authorized
type AuthorizationAttributes struct {
// Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces
Namespace string
// Verb is one of: get, list, watch, create, update, delete
Verb string
// Group is the API group of the resource
Group string
// Version is the API version of the resource
Version string
// Resource is one of the existing resource types
Resource string
// ResourceName is the name of the resource being requested for a "get" or deleted for a "delete"
ResourceName string
// Content is the actual content of the request for create and update
Content kruntime.Object
}
// PolicyList is a collection of Policies
type PolicyList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of policies
Items []Policy
}
// PolicyBindingList is a collection of PolicyBindings
type PolicyBindingList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of policyBindings
Items []PolicyBinding
}
// RoleBindingList is a collection of RoleBindings
type RoleBindingList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of roleBindings
Items []RoleBinding
}
// RoleList is a collection of Roles
type RoleList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of roles
Items []Role
}
// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.
type ClusterRole struct {
unversioned.TypeMeta
// Standard object's metadata.
kapi.ObjectMeta
// Rules holds all the PolicyRules for this ClusterRole
Rules []PolicyRule
}
// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace.
// It adds who information via Users and Groups and namespace information by which namespace it exists in. ClusterRoleBindings in a given
// namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).
type ClusterRoleBinding struct {
unversioned.TypeMeta
// Standard object's metadata.
kapi.ObjectMeta
// Subjects hold object references of to authorize with this rule
Subjects []kapi.ObjectReference
// RoleRef can only reference the current namespace and the global namespace
// If the ClusterRoleRef cannot be resolved, the Authorizer must return an error.
// Since Policy is a singleton, this is sufficient knowledge to locate a role
RoleRef kapi.ObjectReference
}
// ClusterPolicy is a object that holds all the ClusterRoles for a particular namespace. There is at most
// one ClusterPolicy document per namespace.
type ClusterPolicy struct {
unversioned.TypeMeta
// Standard object's metadata.
kapi.ObjectMeta
// LastModified is the last time that any part of the ClusterPolicy was created, updated, or deleted
LastModified unversioned.Time
// Roles holds all the ClusterRoles held by this ClusterPolicy, mapped by Role.Name
Roles map[string]*ClusterRole
}
// ClusterPolicyBinding is a object that holds all the ClusterRoleBindings for a particular namespace. There is
// one ClusterPolicyBinding document per referenced ClusterPolicy namespace
type ClusterPolicyBinding struct {
unversioned.TypeMeta
// Standard object's metadata.
kapi.ObjectMeta
// LastModified is the last time that any part of the ClusterPolicyBinding was created, updated, or deleted
LastModified unversioned.Time
// ClusterPolicyRef is a reference to the ClusterPolicy that contains all the ClusterRoles that this ClusterPolicyBinding's RoleBindings may reference
PolicyRef kapi.ObjectReference
// RoleBindings holds all the RoleBindings held by this ClusterPolicyBinding, mapped by RoleBinding.Name
RoleBindings map[string]*ClusterRoleBinding
}
// ClusterPolicyList is a collection of ClusterPolicies
type ClusterPolicyList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of ClusterPolicies
Items []ClusterPolicy
}
// ClusterPolicyBindingList is a collection of ClusterPolicyBindings
type ClusterPolicyBindingList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of ClusterPolicyBindings
Items []ClusterPolicyBinding
}
// ClusterRoleBindingList is a collection of ClusterRoleBindings
type ClusterRoleBindingList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of ClusterRoleBindings
Items []ClusterRoleBinding
}
// ClusterRoleList is a collection of ClusterRoles
type ClusterRoleList struct {
unversioned.TypeMeta
// Standard object's metadata.
unversioned.ListMeta
// Items is a list of ClusterRoles
Items []ClusterRole
}

View File

@ -0,0 +1,937 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package api
import (
api "k8s.io/kubernetes/pkg/api"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
conversion "k8s.io/kubernetes/pkg/conversion"
)
func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_api_BinaryBuildRequestOptions,
DeepCopy_api_BinaryBuildSource,
DeepCopy_api_Build,
DeepCopy_api_BuildConfig,
DeepCopy_api_BuildConfigList,
DeepCopy_api_BuildConfigSpec,
DeepCopy_api_BuildConfigStatus,
DeepCopy_api_BuildList,
DeepCopy_api_BuildLog,
DeepCopy_api_BuildLogOptions,
DeepCopy_api_BuildOutput,
DeepCopy_api_BuildPostCommitSpec,
DeepCopy_api_BuildRequest,
DeepCopy_api_BuildSource,
DeepCopy_api_BuildSpec,
DeepCopy_api_BuildStatus,
DeepCopy_api_BuildStrategy,
DeepCopy_api_BuildTriggerCause,
DeepCopy_api_BuildTriggerPolicy,
DeepCopy_api_CommonSpec,
DeepCopy_api_CustomBuildStrategy,
DeepCopy_api_DockerBuildStrategy,
DeepCopy_api_GenericWebHookCause,
DeepCopy_api_GenericWebHookEvent,
DeepCopy_api_GitBuildSource,
DeepCopy_api_GitHubWebHookCause,
DeepCopy_api_GitInfo,
DeepCopy_api_GitRefInfo,
DeepCopy_api_GitSourceRevision,
DeepCopy_api_ImageChangeCause,
DeepCopy_api_ImageChangeTrigger,
DeepCopy_api_ImageSource,
DeepCopy_api_ImageSourcePath,
DeepCopy_api_JenkinsPipelineBuildStrategy,
DeepCopy_api_SecretBuildSource,
DeepCopy_api_SecretSpec,
DeepCopy_api_SourceBuildStrategy,
DeepCopy_api_SourceControlUser,
DeepCopy_api_SourceRevision,
DeepCopy_api_WebHookTrigger,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
func DeepCopy_api_BinaryBuildRequestOptions(in BinaryBuildRequestOptions, out *BinaryBuildRequestOptions, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.AsFile = in.AsFile
out.Commit = in.Commit
out.Message = in.Message
out.AuthorName = in.AuthorName
out.AuthorEmail = in.AuthorEmail
out.CommitterName = in.CommitterName
out.CommitterEmail = in.CommitterEmail
return nil
}
func DeepCopy_api_BinaryBuildSource(in BinaryBuildSource, out *BinaryBuildSource, c *conversion.Cloner) error {
out.AsFile = in.AsFile
return nil
}
func DeepCopy_api_Build(in Build, out *Build, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := DeepCopy_api_BuildSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_api_BuildStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_BuildConfig(in BuildConfig, out *BuildConfig, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := DeepCopy_api_BuildConfigSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_api_BuildConfigStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_BuildConfigList(in BuildConfigList, out *BuildConfigList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]BuildConfig, len(in))
for i := range in {
if err := DeepCopy_api_BuildConfig(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_BuildConfigSpec(in BuildConfigSpec, out *BuildConfigSpec, c *conversion.Cloner) error {
if in.Triggers != nil {
in, out := in.Triggers, &out.Triggers
*out = make([]BuildTriggerPolicy, len(in))
for i := range in {
if err := DeepCopy_api_BuildTriggerPolicy(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Triggers = nil
}
out.RunPolicy = in.RunPolicy
if err := DeepCopy_api_CommonSpec(in.CommonSpec, &out.CommonSpec, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_BuildConfigStatus(in BuildConfigStatus, out *BuildConfigStatus, c *conversion.Cloner) error {
out.LastVersion = in.LastVersion
return nil
}
func DeepCopy_api_BuildList(in BuildList, out *BuildList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]Build, len(in))
for i := range in {
if err := DeepCopy_api_Build(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_BuildLog(in BuildLog, out *BuildLog, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_BuildLogOptions(in BuildLogOptions, out *BuildLogOptions, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Container = in.Container
out.Follow = in.Follow
out.Previous = in.Previous
if in.SinceSeconds != nil {
in, out := in.SinceSeconds, &out.SinceSeconds
*out = new(int64)
**out = *in
} else {
out.SinceSeconds = nil
}
if in.SinceTime != nil {
in, out := in.SinceTime, &out.SinceTime
*out = new(unversioned.Time)
if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil {
return err
}
} else {
out.SinceTime = nil
}
out.Timestamps = in.Timestamps
if in.TailLines != nil {
in, out := in.TailLines, &out.TailLines
*out = new(int64)
**out = *in
} else {
out.TailLines = nil
}
if in.LimitBytes != nil {
in, out := in.LimitBytes, &out.LimitBytes
*out = new(int64)
**out = *in
} else {
out.LimitBytes = nil
}
out.NoWait = in.NoWait
if in.Version != nil {
in, out := in.Version, &out.Version
*out = new(int64)
**out = *in
} else {
out.Version = nil
}
return nil
}
func DeepCopy_api_BuildOutput(in BuildOutput, out *BuildOutput, c *conversion.Cloner) error {
if in.To != nil {
in, out := in.To, &out.To
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.To = nil
}
if in.PushSecret != nil {
in, out := in.PushSecret, &out.PushSecret
*out = new(api.LocalObjectReference)
if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.PushSecret = nil
}
return nil
}
func DeepCopy_api_BuildPostCommitSpec(in BuildPostCommitSpec, out *BuildPostCommitSpec, c *conversion.Cloner) error {
if in.Command != nil {
in, out := in.Command, &out.Command
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Command = nil
}
if in.Args != nil {
in, out := in.Args, &out.Args
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Args = nil
}
out.Script = in.Script
return nil
}
func DeepCopy_api_BuildRequest(in BuildRequest, out *BuildRequest, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Revision != nil {
in, out := in.Revision, &out.Revision
*out = new(SourceRevision)
if err := DeepCopy_api_SourceRevision(*in, *out, c); err != nil {
return err
}
} else {
out.Revision = nil
}
if in.TriggeredByImage != nil {
in, out := in.TriggeredByImage, &out.TriggeredByImage
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.TriggeredByImage = nil
}
if in.From != nil {
in, out := in.From, &out.From
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.From = nil
}
if in.Binary != nil {
in, out := in.Binary, &out.Binary
*out = new(BinaryBuildSource)
if err := DeepCopy_api_BinaryBuildSource(*in, *out, c); err != nil {
return err
}
} else {
out.Binary = nil
}
if in.LastVersion != nil {
in, out := in.LastVersion, &out.LastVersion
*out = new(int64)
**out = *in
} else {
out.LastVersion = nil
}
if in.Env != nil {
in, out := in.Env, &out.Env
*out = make([]api.EnvVar, len(in))
for i := range in {
if err := api.DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
if in.TriggeredBy != nil {
in, out := in.TriggeredBy, &out.TriggeredBy
*out = make([]BuildTriggerCause, len(in))
for i := range in {
if err := DeepCopy_api_BuildTriggerCause(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.TriggeredBy = nil
}
return nil
}
func DeepCopy_api_BuildSource(in BuildSource, out *BuildSource, c *conversion.Cloner) error {
if in.Binary != nil {
in, out := in.Binary, &out.Binary
*out = new(BinaryBuildSource)
if err := DeepCopy_api_BinaryBuildSource(*in, *out, c); err != nil {
return err
}
} else {
out.Binary = nil
}
if in.Dockerfile != nil {
in, out := in.Dockerfile, &out.Dockerfile
*out = new(string)
**out = *in
} else {
out.Dockerfile = nil
}
if in.Git != nil {
in, out := in.Git, &out.Git
*out = new(GitBuildSource)
if err := DeepCopy_api_GitBuildSource(*in, *out, c); err != nil {
return err
}
} else {
out.Git = nil
}
if in.Images != nil {
in, out := in.Images, &out.Images
*out = make([]ImageSource, len(in))
for i := range in {
if err := DeepCopy_api_ImageSource(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Images = nil
}
out.ContextDir = in.ContextDir
if in.SourceSecret != nil {
in, out := in.SourceSecret, &out.SourceSecret
*out = new(api.LocalObjectReference)
if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.SourceSecret = nil
}
if in.Secrets != nil {
in, out := in.Secrets, &out.Secrets
*out = make([]SecretBuildSource, len(in))
for i := range in {
if err := DeepCopy_api_SecretBuildSource(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Secrets = nil
}
return nil
}
func DeepCopy_api_BuildSpec(in BuildSpec, out *BuildSpec, c *conversion.Cloner) error {
if err := DeepCopy_api_CommonSpec(in.CommonSpec, &out.CommonSpec, c); err != nil {
return err
}
if in.TriggeredBy != nil {
in, out := in.TriggeredBy, &out.TriggeredBy
*out = make([]BuildTriggerCause, len(in))
for i := range in {
if err := DeepCopy_api_BuildTriggerCause(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.TriggeredBy = nil
}
return nil
}
func DeepCopy_api_BuildStatus(in BuildStatus, out *BuildStatus, c *conversion.Cloner) error {
out.Phase = in.Phase
out.Cancelled = in.Cancelled
out.Reason = in.Reason
out.Message = in.Message
if in.StartTimestamp != nil {
in, out := in.StartTimestamp, &out.StartTimestamp
*out = new(unversioned.Time)
if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil {
return err
}
} else {
out.StartTimestamp = nil
}
if in.CompletionTimestamp != nil {
in, out := in.CompletionTimestamp, &out.CompletionTimestamp
*out = new(unversioned.Time)
if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil {
return err
}
} else {
out.CompletionTimestamp = nil
}
out.Duration = in.Duration
out.OutputDockerImageReference = in.OutputDockerImageReference
if in.Config != nil {
in, out := in.Config, &out.Config
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.Config = nil
}
return nil
}
func DeepCopy_api_BuildStrategy(in BuildStrategy, out *BuildStrategy, c *conversion.Cloner) error {
if in.DockerStrategy != nil {
in, out := in.DockerStrategy, &out.DockerStrategy
*out = new(DockerBuildStrategy)
if err := DeepCopy_api_DockerBuildStrategy(*in, *out, c); err != nil {
return err
}
} else {
out.DockerStrategy = nil
}
if in.SourceStrategy != nil {
in, out := in.SourceStrategy, &out.SourceStrategy
*out = new(SourceBuildStrategy)
if err := DeepCopy_api_SourceBuildStrategy(*in, *out, c); err != nil {
return err
}
} else {
out.SourceStrategy = nil
}
if in.CustomStrategy != nil {
in, out := in.CustomStrategy, &out.CustomStrategy
*out = new(CustomBuildStrategy)
if err := DeepCopy_api_CustomBuildStrategy(*in, *out, c); err != nil {
return err
}
} else {
out.CustomStrategy = nil
}
if in.JenkinsPipelineStrategy != nil {
in, out := in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy
*out = new(JenkinsPipelineBuildStrategy)
if err := DeepCopy_api_JenkinsPipelineBuildStrategy(*in, *out, c); err != nil {
return err
}
} else {
out.JenkinsPipelineStrategy = nil
}
return nil
}
func DeepCopy_api_BuildTriggerCause(in BuildTriggerCause, out *BuildTriggerCause, c *conversion.Cloner) error {
out.Message = in.Message
if in.GenericWebHook != nil {
in, out := in.GenericWebHook, &out.GenericWebHook
*out = new(GenericWebHookCause)
if err := DeepCopy_api_GenericWebHookCause(*in, *out, c); err != nil {
return err
}
} else {
out.GenericWebHook = nil
}
if in.GitHubWebHook != nil {
in, out := in.GitHubWebHook, &out.GitHubWebHook
*out = new(GitHubWebHookCause)
if err := DeepCopy_api_GitHubWebHookCause(*in, *out, c); err != nil {
return err
}
} else {
out.GitHubWebHook = nil
}
if in.ImageChangeBuild != nil {
in, out := in.ImageChangeBuild, &out.ImageChangeBuild
*out = new(ImageChangeCause)
if err := DeepCopy_api_ImageChangeCause(*in, *out, c); err != nil {
return err
}
} else {
out.ImageChangeBuild = nil
}
return nil
}
func DeepCopy_api_BuildTriggerPolicy(in BuildTriggerPolicy, out *BuildTriggerPolicy, c *conversion.Cloner) error {
out.Type = in.Type
if in.GitHubWebHook != nil {
in, out := in.GitHubWebHook, &out.GitHubWebHook
*out = new(WebHookTrigger)
if err := DeepCopy_api_WebHookTrigger(*in, *out, c); err != nil {
return err
}
} else {
out.GitHubWebHook = nil
}
if in.GenericWebHook != nil {
in, out := in.GenericWebHook, &out.GenericWebHook
*out = new(WebHookTrigger)
if err := DeepCopy_api_WebHookTrigger(*in, *out, c); err != nil {
return err
}
} else {
out.GenericWebHook = nil
}
if in.ImageChange != nil {
in, out := in.ImageChange, &out.ImageChange
*out = new(ImageChangeTrigger)
if err := DeepCopy_api_ImageChangeTrigger(*in, *out, c); err != nil {
return err
}
} else {
out.ImageChange = nil
}
return nil
}
func DeepCopy_api_CommonSpec(in CommonSpec, out *CommonSpec, c *conversion.Cloner) error {
out.ServiceAccount = in.ServiceAccount
if err := DeepCopy_api_BuildSource(in.Source, &out.Source, c); err != nil {
return err
}
if in.Revision != nil {
in, out := in.Revision, &out.Revision
*out = new(SourceRevision)
if err := DeepCopy_api_SourceRevision(*in, *out, c); err != nil {
return err
}
} else {
out.Revision = nil
}
if err := DeepCopy_api_BuildStrategy(in.Strategy, &out.Strategy, c); err != nil {
return err
}
if err := DeepCopy_api_BuildOutput(in.Output, &out.Output, c); err != nil {
return err
}
if err := api.DeepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
return err
}
if err := DeepCopy_api_BuildPostCommitSpec(in.PostCommit, &out.PostCommit, c); err != nil {
return err
}
if in.CompletionDeadlineSeconds != nil {
in, out := in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds
*out = new(int64)
**out = *in
} else {
out.CompletionDeadlineSeconds = nil
}
return nil
}
func DeepCopy_api_CustomBuildStrategy(in CustomBuildStrategy, out *CustomBuildStrategy, c *conversion.Cloner) error {
if err := api.DeepCopy_api_ObjectReference(in.From, &out.From, c); err != nil {
return err
}
if in.PullSecret != nil {
in, out := in.PullSecret, &out.PullSecret
*out = new(api.LocalObjectReference)
if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.PullSecret = nil
}
if in.Env != nil {
in, out := in.Env, &out.Env
*out = make([]api.EnvVar, len(in))
for i := range in {
if err := api.DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
out.ExposeDockerSocket = in.ExposeDockerSocket
out.ForcePull = in.ForcePull
if in.Secrets != nil {
in, out := in.Secrets, &out.Secrets
*out = make([]SecretSpec, len(in))
for i := range in {
if err := DeepCopy_api_SecretSpec(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Secrets = nil
}
out.BuildAPIVersion = in.BuildAPIVersion
return nil
}
func DeepCopy_api_DockerBuildStrategy(in DockerBuildStrategy, out *DockerBuildStrategy, c *conversion.Cloner) error {
if in.From != nil {
in, out := in.From, &out.From
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.From = nil
}
if in.PullSecret != nil {
in, out := in.PullSecret, &out.PullSecret
*out = new(api.LocalObjectReference)
if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.PullSecret = nil
}
out.NoCache = in.NoCache
if in.Env != nil {
in, out := in.Env, &out.Env
*out = make([]api.EnvVar, len(in))
for i := range in {
if err := api.DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
out.ForcePull = in.ForcePull
out.DockerfilePath = in.DockerfilePath
return nil
}
func DeepCopy_api_GenericWebHookCause(in GenericWebHookCause, out *GenericWebHookCause, c *conversion.Cloner) error {
if in.Revision != nil {
in, out := in.Revision, &out.Revision
*out = new(SourceRevision)
if err := DeepCopy_api_SourceRevision(*in, *out, c); err != nil {
return err
}
} else {
out.Revision = nil
}
out.Secret = in.Secret
return nil
}
func DeepCopy_api_GenericWebHookEvent(in GenericWebHookEvent, out *GenericWebHookEvent, c *conversion.Cloner) error {
if in.Git != nil {
in, out := in.Git, &out.Git
*out = new(GitInfo)
if err := DeepCopy_api_GitInfo(*in, *out, c); err != nil {
return err
}
} else {
out.Git = nil
}
if in.Env != nil {
in, out := in.Env, &out.Env
*out = make([]api.EnvVar, len(in))
for i := range in {
if err := api.DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
return nil
}
func DeepCopy_api_GitBuildSource(in GitBuildSource, out *GitBuildSource, c *conversion.Cloner) error {
out.URI = in.URI
out.Ref = in.Ref
if in.HTTPProxy != nil {
in, out := in.HTTPProxy, &out.HTTPProxy
*out = new(string)
**out = *in
} else {
out.HTTPProxy = nil
}
if in.HTTPSProxy != nil {
in, out := in.HTTPSProxy, &out.HTTPSProxy
*out = new(string)
**out = *in
} else {
out.HTTPSProxy = nil
}
return nil
}
func DeepCopy_api_GitHubWebHookCause(in GitHubWebHookCause, out *GitHubWebHookCause, c *conversion.Cloner) error {
if in.Revision != nil {
in, out := in.Revision, &out.Revision
*out = new(SourceRevision)
if err := DeepCopy_api_SourceRevision(*in, *out, c); err != nil {
return err
}
} else {
out.Revision = nil
}
out.Secret = in.Secret
return nil
}
func DeepCopy_api_GitInfo(in GitInfo, out *GitInfo, c *conversion.Cloner) error {
if err := DeepCopy_api_GitBuildSource(in.GitBuildSource, &out.GitBuildSource, c); err != nil {
return err
}
if err := DeepCopy_api_GitSourceRevision(in.GitSourceRevision, &out.GitSourceRevision, c); err != nil {
return err
}
if in.Refs != nil {
in, out := in.Refs, &out.Refs
*out = make([]GitRefInfo, len(in))
for i := range in {
if err := DeepCopy_api_GitRefInfo(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Refs = nil
}
return nil
}
func DeepCopy_api_GitRefInfo(in GitRefInfo, out *GitRefInfo, c *conversion.Cloner) error {
if err := DeepCopy_api_GitBuildSource(in.GitBuildSource, &out.GitBuildSource, c); err != nil {
return err
}
if err := DeepCopy_api_GitSourceRevision(in.GitSourceRevision, &out.GitSourceRevision, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_GitSourceRevision(in GitSourceRevision, out *GitSourceRevision, c *conversion.Cloner) error {
out.Commit = in.Commit
if err := DeepCopy_api_SourceControlUser(in.Author, &out.Author, c); err != nil {
return err
}
if err := DeepCopy_api_SourceControlUser(in.Committer, &out.Committer, c); err != nil {
return err
}
out.Message = in.Message
return nil
}
func DeepCopy_api_ImageChangeCause(in ImageChangeCause, out *ImageChangeCause, c *conversion.Cloner) error {
out.ImageID = in.ImageID
if in.FromRef != nil {
in, out := in.FromRef, &out.FromRef
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.FromRef = nil
}
return nil
}
func DeepCopy_api_ImageChangeTrigger(in ImageChangeTrigger, out *ImageChangeTrigger, c *conversion.Cloner) error {
out.LastTriggeredImageID = in.LastTriggeredImageID
if in.From != nil {
in, out := in.From, &out.From
*out = new(api.ObjectReference)
if err := api.DeepCopy_api_ObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.From = nil
}
return nil
}
func DeepCopy_api_ImageSource(in ImageSource, out *ImageSource, c *conversion.Cloner) error {
if err := api.DeepCopy_api_ObjectReference(in.From, &out.From, c); err != nil {
return err
}
if in.Paths != nil {
in, out := in.Paths, &out.Paths
*out = make([]ImageSourcePath, len(in))
for i := range in {
if err := DeepCopy_api_ImageSourcePath(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Paths = nil
}
if in.PullSecret != nil {
in, out := in.PullSecret, &out.PullSecret
*out = new(api.LocalObjectReference)
if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.PullSecret = nil
}
return nil
}
func DeepCopy_api_ImageSourcePath(in ImageSourcePath, out *ImageSourcePath, c *conversion.Cloner) error {
out.SourcePath = in.SourcePath
out.DestinationDir = in.DestinationDir
return nil
}
func DeepCopy_api_JenkinsPipelineBuildStrategy(in JenkinsPipelineBuildStrategy, out *JenkinsPipelineBuildStrategy, c *conversion.Cloner) error {
out.JenkinsfilePath = in.JenkinsfilePath
out.Jenkinsfile = in.Jenkinsfile
return nil
}
func DeepCopy_api_SecretBuildSource(in SecretBuildSource, out *SecretBuildSource, c *conversion.Cloner) error {
if err := api.DeepCopy_api_LocalObjectReference(in.Secret, &out.Secret, c); err != nil {
return err
}
out.DestinationDir = in.DestinationDir
return nil
}
func DeepCopy_api_SecretSpec(in SecretSpec, out *SecretSpec, c *conversion.Cloner) error {
if err := api.DeepCopy_api_LocalObjectReference(in.SecretSource, &out.SecretSource, c); err != nil {
return err
}
out.MountPath = in.MountPath
return nil
}
func DeepCopy_api_SourceBuildStrategy(in SourceBuildStrategy, out *SourceBuildStrategy, c *conversion.Cloner) error {
if err := api.DeepCopy_api_ObjectReference(in.From, &out.From, c); err != nil {
return err
}
if in.PullSecret != nil {
in, out := in.PullSecret, &out.PullSecret
*out = new(api.LocalObjectReference)
if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil {
return err
}
} else {
out.PullSecret = nil
}
if in.Env != nil {
in, out := in.Env, &out.Env
*out = make([]api.EnvVar, len(in))
for i := range in {
if err := api.DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
out.Scripts = in.Scripts
out.Incremental = in.Incremental
out.ForcePull = in.ForcePull
return nil
}
func DeepCopy_api_SourceControlUser(in SourceControlUser, out *SourceControlUser, c *conversion.Cloner) error {
out.Name = in.Name
out.Email = in.Email
return nil
}
func DeepCopy_api_SourceRevision(in SourceRevision, out *SourceRevision, c *conversion.Cloner) error {
if in.Git != nil {
in, out := in.Git, &out.Git
*out = new(GitSourceRevision)
if err := DeepCopy_api_GitSourceRevision(*in, *out, c); err != nil {
return err
}
} else {
out.Git = nil
}
return nil
}
func DeepCopy_api_WebHookTrigger(in WebHookTrigger, out *WebHookTrigger, c *conversion.Cloner) error {
out.Secret = in.Secret
out.AllowEnv = in.AllowEnv
return nil
}

View File

@ -0,0 +1,23 @@
package api
import "k8s.io/kubernetes/pkg/fields"
// BuildToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func BuildToSelectableFields(build *Build) fields.Set {
return fields.Set{
"metadata.name": build.Name,
"metadata.namespace": build.Namespace,
"status": string(build.Status.Phase),
"podName": GetBuildPodName(build),
}
}
// BuildConfigToSelectableFields returns a label set that represents the object
// changes to the returned keys require registering conversions for existing versions using Scheme.AddFieldLabelConversionFunc
func BuildConfigToSelectableFields(buildConfig *BuildConfig) fields.Set {
return fields.Set{
"metadata.name": buildConfig.Name,
"metadata.namespace": buildConfig.Namespace,
}
}

View File

@ -0,0 +1,61 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
)
// BuildToPodLogOptions builds a PodLogOptions object out of a BuildLogOptions.
// Currently BuildLogOptions.Container and BuildLogOptions.Previous aren't used
// so they won't be copied to PodLogOptions.
func BuildToPodLogOptions(opts *BuildLogOptions) *kapi.PodLogOptions {
return &kapi.PodLogOptions{
Follow: opts.Follow,
SinceSeconds: opts.SinceSeconds,
SinceTime: opts.SinceTime,
Timestamps: opts.Timestamps,
TailLines: opts.TailLines,
LimitBytes: opts.LimitBytes,
}
}
// PredicateFunc is testing an argument and decides does it meet some criteria or not.
// It can be used for filtering elements based on some conditions.
type PredicateFunc func(interface{}) bool
// FilterBuilds returns array of builds that satisfies predicate function.
func FilterBuilds(builds []Build, predicate PredicateFunc) []Build {
if len(builds) == 0 {
return builds
}
result := make([]Build, 0)
for _, build := range builds {
if predicate(build) {
result = append(result, build)
}
}
return result
}
// ByBuildConfigPredicate matches all builds that have build config annotation or label with specified value.
func ByBuildConfigPredicate(labelValue string) PredicateFunc {
return func(arg interface{}) bool {
return (hasBuildConfigAnnotation(arg.(Build), BuildConfigAnnotation, labelValue) ||
hasBuildConfigLabel(arg.(Build), BuildConfigLabel, labelValue) ||
hasBuildConfigLabel(arg.(Build), BuildConfigLabelDeprecated, labelValue))
}
}
func hasBuildConfigLabel(build Build, labelName, labelValue string) bool {
value, ok := build.Labels[labelName]
return ok && value == labelValue
}
func hasBuildConfigAnnotation(build Build, annotationName, annotationValue string) bool {
if build.Annotations == nil {
return false
}
value, ok := build.Annotations[annotationName]
return ok && value == annotationValue
}

View File

@ -0,0 +1,49 @@
package api
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) unversioned.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) unversioned.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func AddToScheme(scheme *runtime.Scheme) {
// Add the API to Scheme.
addKnownTypes(scheme)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) {
scheme.AddKnownTypes(SchemeGroupVersion,
&Build{},
&BuildList{},
&BuildConfig{},
&BuildConfigList{},
&BuildLog{},
&BuildRequest{},
&BuildLogOptions{},
&BinaryBuildRequestOptions{},
)
}
func (obj *Build) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildConfigList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildLog) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BinaryBuildRequestOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -0,0 +1,33 @@
package api
// BuildSliceByCreationTimestamp implements sort.Interface for []Build
// based on the CreationTimestamp field.
type BuildSliceByCreationTimestamp []Build
func (b BuildSliceByCreationTimestamp) Len() int {
return len(b)
}
func (b BuildSliceByCreationTimestamp) Less(i, j int) bool {
return b[i].CreationTimestamp.Before(b[j].CreationTimestamp)
}
func (b BuildSliceByCreationTimestamp) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
// BuildPtrSliceByCreationTimestamp implements sort.Interface for []*Build
// based on the CreationTimestamp field.
type BuildPtrSliceByCreationTimestamp []*Build
func (b BuildPtrSliceByCreationTimestamp) Len() int {
return len(b)
}
func (b BuildPtrSliceByCreationTimestamp) Less(i, j int) bool {
return b[i].CreationTimestamp.Before(b[j].CreationTimestamp)
}
func (b BuildPtrSliceByCreationTimestamp) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}

View File

@ -0,0 +1,915 @@
package api
import (
"time"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/util/sets"
)
const (
// BuildAnnotation is an annotation that identifies a Pod as being for a Build
BuildAnnotation = "openshift.io/build.name"
// BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from
BuildConfigAnnotation = "openshift.io/build-config.name"
// BuildNumberAnnotation is an annotation whose value is the sequential number for this Build
BuildNumberAnnotation = "openshift.io/build.number"
// BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from
BuildCloneAnnotation = "openshift.io/build.clone-of"
// BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build
BuildPodNameAnnotation = "openshift.io/build.pod-name"
// BuildLabel is the key of a Pod label whose value is the Name of a Build which is run.
// NOTE: The value for this label may not contain the entire Build name because it will be
// truncated to maximum label length.
BuildLabel = "openshift.io/build.name"
// BuildRunPolicyLabel represents the start policy used to to start the build.
BuildRunPolicyLabel = "openshift.io/build.start-policy"
// DefaultDockerLabelNamespace is the key of a Build label, whose values are build metadata.
DefaultDockerLabelNamespace = "io.openshift."
// OriginVersion is an environment variable key that indicates the version of origin that
// created this build definition.
OriginVersion = "ORIGIN_VERSION"
// AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in
// Source builder images
AllowedUIDs = "ALLOWED_UIDS"
// DropCapabilities is an environment variable that contains a list of capabilities to drop when
// executing a Source build
DropCapabilities = "DROP_CAPS"
// BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig
// on which the Build is based. NOTE: The value for this label may not contain the entire
// BuildConfig name because it will be truncated to maximum label length.
BuildConfigLabel = "openshift.io/build-config.name"
// BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces.
// We keep it for backward compatibility.
BuildConfigLabelDeprecated = "buildconfig"
// BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused.
// New Builds cannot be instantiated from a paused BuildConfig.
BuildConfigPausedAnnotation = "openshift.io/build-config.paused"
)
// +genclient=true
// Build encapsulates the inputs needed to produce a new deployable image, as well as
// the status of the execution and a reference to the Pod which executed the build.
type Build struct {
unversioned.TypeMeta
kapi.ObjectMeta
// Spec is all the inputs used to execute the build.
Spec BuildSpec
// Status is the current status of the build.
Status BuildStatus
}
// BuildSpec encapsulates all the inputs necessary to represent a build.
type BuildSpec struct {
CommonSpec
// TriggeredBy describes which triggers started the most recent update to the
// build configuration and contains information about those triggers.
TriggeredBy []BuildTriggerCause
}
// CommonSpec encapsulates all common fields between Build and BuildConfig.
type CommonSpec struct {
// ServiceAccount is the name of the ServiceAccount to use to run the pod
// created by this build.
// The pod will be allowed to use secrets referenced by the ServiceAccount.
ServiceAccount string
// Source describes the SCM in use.
Source BuildSource
// Revision is the information from the source for a specific repo
// snapshot.
// This is optional.
Revision *SourceRevision
// Strategy defines how to perform a build.
Strategy BuildStrategy
// Output describes the Docker image the Strategy should produce.
Output BuildOutput
// Resources computes resource requirements to execute the build.
Resources kapi.ResourceRequirements
// PostCommit is a build hook executed after the build output image is
// committed, before it is pushed to a registry.
PostCommit BuildPostCommitSpec
// CompletionDeadlineSeconds is an optional duration in seconds, counted from
// the time when a build pod gets scheduled in the system, that the build may
// be active on a node before the system actively tries to terminate the
// build; value must be positive integer.
CompletionDeadlineSeconds *int64
}
// BuildTriggerCause holds information about a triggered build. It is used for
// displaying build trigger data for each build and build configuration in oc
// describe. It is also used to describe which triggers led to the most recent
// update in the build configuration.
type BuildTriggerCause struct {
// Message is used to store a human readable message for why the build was
// triggered. E.g.: "Manually triggered by user", "Configuration change",etc.
Message string
// genericWebHook represents data for a generic webhook that fired a
// specific build.
GenericWebHook *GenericWebHookCause
// GitHubWebHook represents data for a GitHub webhook that fired a specific
// build.
GitHubWebHook *GitHubWebHookCause
// ImageChangeBuild stores information about an imagechange event that
// triggered a new build.
ImageChangeBuild *ImageChangeCause
}
// GenericWebHookCause holds information about a generic WebHook that
// triggered a build.
type GenericWebHookCause struct {
// Revision is an optional field that stores the git source revision
// information of the generic webhook trigger when it is available.
Revision *SourceRevision
// Secret is the obfuscated webhook secret that triggered a build.
Secret string
}
// GitHubWebHookCause has information about a GitHub webhook that triggered a
// build.
type GitHubWebHookCause struct {
// Revision is the git source revision information of the trigger.
Revision *SourceRevision
// Secret is the obfuscated webhook secret that triggered a build.
Secret string
}
// ImageChangeCause contains information about the image that triggered a
// build.
type ImageChangeCause struct {
// ImageID is the ID of the image that triggered a a new build.
ImageID string
// FromRef contains detailed information about an image that triggered a
// build
FromRef *kapi.ObjectReference
}
// BuildStatus contains the status of a build
type BuildStatus struct {
// Phase is the point in the build lifecycle.
Phase BuildPhase
// Cancelled describes if a cancel event was triggered for the build.
Cancelled bool
// Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
Reason StatusReason
// Message is a human-readable message indicating details about why the build has this status.
Message string
// StartTimestamp is a timestamp representing the server time when this Build started
// running in a Pod.
// It is represented in RFC3339 form and is in UTC.
StartTimestamp *unversioned.Time
// CompletionTimestamp is a timestamp representing the server time when this Build was
// finished, whether that build failed or succeeded. It reflects the time at which
// the Pod running the Build terminated.
// It is represented in RFC3339 form and is in UTC.
CompletionTimestamp *unversioned.Time
// Duration contains time.Duration object describing build time.
Duration time.Duration
// OutputDockerImageReference contains a reference to the Docker image that
// will be built by this build. It's value is computed from
// Build.Spec.Output.To, and should include the registry address, so that
// it can be used to push and pull the image.
OutputDockerImageReference string
// Config is an ObjectReference to the BuildConfig this Build is based on.
Config *kapi.ObjectReference
}
// BuildPhase represents the status of a build at a point in time.
type BuildPhase string
// Valid values for BuildPhase.
const (
// BuildPhaseNew is automatically assigned to a newly created build.
BuildPhaseNew BuildPhase = "New"
// BuildPhasePending indicates that a pod name has been assigned and a build is
// about to start running.
BuildPhasePending BuildPhase = "Pending"
// BuildPhaseRunning indicates that a pod has been created and a build is running.
BuildPhaseRunning BuildPhase = "Running"
// BuildPhaseComplete indicates that a build has been successful.
BuildPhaseComplete BuildPhase = "Complete"
// BuildPhaseFailed indicates that a build has executed and failed.
BuildPhaseFailed BuildPhase = "Failed"
// BuildPhaseError indicates that an error prevented the build from executing.
BuildPhaseError BuildPhase = "Error"
// BuildPhaseCancelled indicates that a running/pending build was stopped from executing.
BuildPhaseCancelled BuildPhase = "Cancelled"
)
// StatusReason is a brief CamelCase string that describes a temporary or
// permanent build error condition, meant for machine parsing and tidy display
// in the CLI.
type StatusReason string
// These are the valid reasons of build statuses.
const (
// StatusReasonError is a generic reason for a build error condition.
StatusReasonError StatusReason = "Error"
// StatusReasonCannotCreateBuildPodSpec is an error condition when the build
// strategy cannot create a build pod spec.
StatusReasonCannotCreateBuildPodSpec = "CannotCreateBuildPodSpec"
// StatusReasonCannotCreateBuildPod is an error condition when a build pod
// cannot be created.
StatusReasonCannotCreateBuildPod = "CannotCreateBuildPod"
// StatusReasonInvalidOutputReference is an error condition when the build
// output is an invalid reference.
StatusReasonInvalidOutputReference = "InvalidOutputReference"
// StatusReasonCancelBuildFailed is an error condition when cancelling a build
// fails.
StatusReasonCancelBuildFailed = "CancelBuildFailed"
// StatusReasonBuildPodDeleted is an error condition when the build pod is
// deleted before build completion.
StatusReasonBuildPodDeleted = "BuildPodDeleted"
// StatusReasonExceededRetryTimeout is an error condition when the build has
// not completed and retrying the build times out.
StatusReasonExceededRetryTimeout = "ExceededRetryTimeout"
// StatusReasonMissingPushSecret indicates that the build is missing required
// secret for pushing the output image.
// The build will stay in the pending state until the secret is created, or the build times out.
StatusReasonMissingPushSecret = "MissingPushSecret"
)
// BuildSource is the input used for the build.
type BuildSource struct {
// Binary builds accept a binary as their input. The binary is generally assumed to be a tar,
// gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build
// context and an optional Dockerfile may be specified to override any Dockerfile in the
// build context. For Source builds, this is assumed to be an archive as described above. For
// Source and Docker builds, if binary.asFile is set the build will receive a directory with
// a single file. contextDir may be used when an archive is provided. Custom builds will
// receive this binary as input on STDIN.
Binary *BinaryBuildSource
// Dockerfile is the raw contents of a Dockerfile which should be built. When this option is
// specified, the FROM may be modified based on your strategy base image and additional ENV
// stanzas from your strategy environment will be added after the FROM, but before the rest
// of your Dockerfile stanzas. The Dockerfile source type may be used with other options like
// git - in those cases the Git repo will have any innate Dockerfile replaced in the context
// dir.
Dockerfile *string
// Git contains optional information about git build source
Git *GitBuildSource
// Images describes a set of images to be used to provide source for the build
Images []ImageSource
// ContextDir specifies the sub-directory where the source code for the application exists.
// This allows to have buildable sources in directory other than root of
// repository.
ContextDir string
// SourceSecret is the name of a Secret that would be used for setting
// up the authentication for cloning private repository.
// The secret contains valid credentials for remote repository, where the
// data's key represent the authentication method to be used and value is
// the base64 encoded credentials. Supported auth methods are: ssh-privatekey.
// TODO: This needs to move under the GitBuildSource struct since it's only
// used for git authentication
SourceSecret *kapi.LocalObjectReference
// Secrets represents a list of secrets and their destinations that will
// be used only for the build.
Secrets []SecretBuildSource
}
// ImageSource describes an image that is used as source for the build
type ImageSource struct {
// From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to
// copy source from.
From kapi.ObjectReference
// Paths is a list of source and destination paths to copy from the image.
Paths []ImageSourcePath
// PullSecret is a reference to a secret to be used to pull the image from a registry
// If the image is pulled from the OpenShift registry, this field does not need to be set.
PullSecret *kapi.LocalObjectReference
}
// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.
type ImageSourcePath struct {
// SourcePath is the absolute path of the file or directory inside the image to
// copy to the build directory.
SourcePath string
// DestinationDir is the relative directory within the build directory
// where files copied from the image are placed.
DestinationDir string
}
// SecretBuildSource describes a secret and its destination directory that will be
// used only at the build time. The content of the secret referenced here will
// be copied into the destination directory instead of mounting.
type SecretBuildSource struct {
// Secret is a reference to an existing secret that you want to use in your
// build.
Secret kapi.LocalObjectReference
// DestinationDir is the directory where the files from the secret should be
// available for the build time.
// For the Source build strategy, these will be injected into a container
// where the assemble script runs. Later, when the script finishes, all files
// injected will be truncated to zero length.
// For the Docker build strategy, these will be copied into the build
// directory, where the Dockerfile is located, so users can ADD or COPY them
// during docker build.
DestinationDir string
}
type BinaryBuildSource struct {
// AsFile indicates that the provided binary input should be considered a single file
// within the build input. For example, specifying "webapp.war" would place the provided
// binary as `/webapp.war` for the builder. If left empty, the Docker and Source build
// strategies assume this file is a zip, tar, or tar.gz file and extract it as the source.
// The custom strategy receives this binary as standard input. This filename may not
// contain slashes or be '..' or '.'.
AsFile string
}
// SourceRevision is the revision or commit information from the source for the build
type SourceRevision struct {
// Git contains information about git-based build source
Git *GitSourceRevision
}
// GitSourceRevision is the commit information from a git source for a build
type GitSourceRevision struct {
// Commit is the commit hash identifying a specific commit
Commit string
// Author is the author of a specific commit
Author SourceControlUser
// Committer is the committer of a specific commit
Committer SourceControlUser
// Message is the description of a specific commit
Message string
}
// GitBuildSource defines the parameters of a Git SCM
type GitBuildSource struct {
// URI points to the source that will be built. The structure of the source
// will depend on the type of build to run
URI string
// Ref is the branch/tag/ref to build.
Ref string
// HTTPProxy is a proxy used to reach the git repository over http
HTTPProxy *string
// HTTPSProxy is a proxy used to reach the git repository over https
HTTPSProxy *string
}
// SourceControlUser defines the identity of a user of source control
type SourceControlUser struct {
// Name of the source control user
Name string
// Email of the source control user
Email string
}
// BuildStrategy contains the details of how to perform a build.
type BuildStrategy struct {
// DockerStrategy holds the parameters to the Docker build strategy.
DockerStrategy *DockerBuildStrategy
// SourceStrategy holds the parameters to the Source build strategy.
SourceStrategy *SourceBuildStrategy
// CustomStrategy holds the parameters to the Custom build strategy
CustomStrategy *CustomBuildStrategy
// JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
// This strategy is experimental.
JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy
}
// BuildStrategyType describes a particular way of performing a build.
type BuildStrategyType string
const (
// CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when
// performing a custom build, if needed.
CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE"
)
// CustomBuildStrategy defines input parameters specific to Custom build.
type CustomBuildStrategy struct {
// From is reference to an DockerImage, ImageStream, ImageStreamTag, or ImageStreamImage from which
// the docker image should be pulled
From kapi.ObjectReference
// PullSecret is the name of a Secret that would be used for setting up
// the authentication for pulling the Docker images from the private Docker
// registries
PullSecret *kapi.LocalObjectReference
// Env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar
// ExposeDockerSocket will allow running Docker commands (and build Docker images) from
// inside the Docker container.
// TODO: Allow admins to enforce 'false' for this option
ExposeDockerSocket bool
// ForcePull describes if the controller should configure the build pod to always pull the images
// for the builder or only pull if it is not present locally
ForcePull bool
// Secrets is a list of additional secrets that will be included in the custom build pod
Secrets []SecretSpec
// BuildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder
BuildAPIVersion string
}
// DockerBuildStrategy defines input parameters specific to Docker build.
type DockerBuildStrategy struct {
// From is reference to an DockerImage, ImageStream, ImageStreamTag, or ImageStreamImage from which
// the docker image should be pulled
// the resulting image will be used in the FROM line of the Dockerfile for this build.
From *kapi.ObjectReference
// PullSecret is the name of a Secret that would be used for setting up
// the authentication for pulling the Docker images from the private Docker
// registries
PullSecret *kapi.LocalObjectReference
// NoCache if set to true indicates that the docker build must be executed with the
// --no-cache=true flag
NoCache bool
// Env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar
// ForcePull describes if the builder should pull the images from registry prior to building.
ForcePull bool
// DockerfilePath is the path of the Dockerfile that will be used to build the Docker image,
// relative to the root of the context (contextDir).
DockerfilePath string
}
// SourceBuildStrategy defines input parameters specific to an Source build.
type SourceBuildStrategy struct {
// From is reference to an DockerImage, ImageStream, ImageStreamTag, or ImageStreamImage from which
// the docker image should be pulled
From kapi.ObjectReference
// PullSecret is the name of a Secret that would be used for setting up
// the authentication for pulling the Docker images from the private Docker
// registries
PullSecret *kapi.LocalObjectReference
// Env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar
// Scripts is the location of Source scripts
Scripts string
// Incremental flag forces the Source build to do incremental builds if true.
Incremental bool
// ForcePull describes if the builder should pull the images from registry prior to building.
ForcePull bool
}
// JenkinsPipelineStrategy holds parameters specific to a Jenkins Pipeline build.
// This strategy is experimental.
type JenkinsPipelineBuildStrategy struct {
// JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
// relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are
// both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.
JenkinsfilePath string
// Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
Jenkinsfile string
}
// A BuildPostCommitSpec holds a build post commit hook specification. The hook
// executes a command in a temporary container running the build output image,
// immediately after the last layer of the image is committed and before the
// image is pushed to a registry. The command is executed with the current
// working directory ($PWD) set to the image's WORKDIR.
//
// The build will be marked as failed if the hook execution fails. It will fail
// if the script or command return a non-zero exit code, or if there is any
// other error related to starting the temporary container.
//
// There are five different ways to configure the hook. As an example, all forms
// below are equivalent and will execute `rake test --verbose`.
//
// 1. Shell script:
//
// BuildPostCommitSpec{
// Script: "rake test --verbose",
// }
//
// The above is a convenient form which is equivalent to:
//
// BuildPostCommitSpec{
// Command: []string{"/bin/sh", "-ic"},
// Args: []string{"rake test --verbose"},
// }
//
// 2. Command as the image entrypoint:
//
// BuildPostCommitSpec{
// Command: []string{"rake", "test", "--verbose"},
// }
//
// Command overrides the image entrypoint in the exec form, as documented in
// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.
//
// 3. Pass arguments to the default entrypoint:
//
// BuildPostCommitSpec{
// Args: []string{"rake", "test", "--verbose"},
// }
//
// This form is only useful if the image entrypoint can handle arguments.
//
// 4. Shell script with arguments:
//
// BuildPostCommitSpec{
// Script: "rake test $1",
// Args: []string{"--verbose"},
// }
//
// This form is useful if you need to pass arguments that would otherwise be
// hard to quote properly in the shell script. In the script, $0 will be
// "/bin/sh" and $1, $2, etc, are the positional arguments from Args.
//
// 5. Command with arguments:
//
// BuildPostCommitSpec{
// Command: []string{"rake", "test"},
// Args: []string{"--verbose"},
// }
//
// This form is equivalent to appending the arguments to the Command slice.
//
// It is invalid to provide both Script and Command simultaneously. If none of
// the fields are specified, the hook is not executed.
type BuildPostCommitSpec struct {
// Command is the command to run. It may not be specified with Script.
// This might be needed if the image doesn't have `/bin/sh`, or if you
// do not want to use a shell. In all other cases, using Script might be
// more convenient.
Command []string
// Args is a list of arguments that are provided to either Command,
// Script or the Docker image's default entrypoint. The arguments are
// placed immediately after the command to be run.
Args []string
// Script is a shell script to be run with `/bin/sh -ic`. It may not be
// specified with Command. Use Script when a shell script is appropriate
// to execute the post build hook, for example for running unit tests
// with `rake test`. If you need control over the image entrypoint, or
// if the image does not have `/bin/sh`, use Command and/or Args.
// The `-i` flag is needed to support CentOS and RHEL images that use
// Software Collections (SCL), in order to have the appropriate
// collections enabled in the shell. E.g., in the Ruby image, this is
// necessary to make `ruby`, `bundle` and other binaries available in
// the PATH.
Script string
}
// BuildOutput is input to a build strategy and describes the Docker image that the strategy
// should produce.
type BuildOutput struct {
// To defines an optional location to push the output of this build to.
// Kind must be one of 'ImageStreamTag' or 'DockerImage'.
// This value will be used to look up a Docker image repository to push to.
// In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of
// the build unless Namespace is specified.
To *kapi.ObjectReference
// PushSecret is the name of a Secret that would be used for setting
// up the authentication for executing the Docker push to authentication
// enabled Docker Registry (or Docker Hub).
PushSecret *kapi.LocalObjectReference
}
// BuildConfig is a template which can be used to create new builds.
type BuildConfig struct {
unversioned.TypeMeta
kapi.ObjectMeta
// Spec holds all the input necessary to produce a new build, and the conditions when
// to trigger them.
Spec BuildConfigSpec
// Status holds any relevant information about a build config
Status BuildConfigStatus
}
// BuildConfigSpec describes when and how builds are created
type BuildConfigSpec struct {
// Triggers determine how new Builds can be launched from a BuildConfig. If
// no triggers are defined, a new build can only occur as a result of an
// explicit client build creation.
Triggers []BuildTriggerPolicy
// RunPolicy describes how the new build created from this build
// configuration will be scheduled for execution.
// This is optional, if not specified we default to "Serial".
RunPolicy BuildRunPolicy
// CommonSpec is the desired build specification
CommonSpec
}
// BuildRunPolicy defines the behaviour of how the new builds are executed
// from the existing build configuration.
type BuildRunPolicy string
const (
// BuildRunPolicyParallel schedules new builds immediately after they are
// created. Builds will be executed in parallel.
BuildRunPolicyParallel BuildRunPolicy = "Parallel"
// BuildRunPolicySerial schedules new builds to execute in a sequence as
// they are created. Every build gets queued up and will execute when the
// previous build completes. This is the default policy.
BuildRunPolicySerial BuildRunPolicy = "Serial"
// BuildRunPolicySerialLatestOnly schedules only the latest build to execute,
// cancelling all the previously queued build.
BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly"
)
// BuildConfigStatus contains current state of the build config object.
type BuildConfigStatus struct {
// LastVersion is used to inform about number of last triggered build.
LastVersion int64
}
// WebHookTrigger is a trigger that gets invoked using a webhook type of post
type WebHookTrigger struct {
// Secret used to validate requests.
Secret string
// AllowEnv determines whether the webhook can set environment variables; can only
// be set to true for GenericWebHook
AllowEnv bool
}
// ImageChangeTrigger allows builds to be triggered when an ImageStream changes
type ImageChangeTrigger struct {
// LastTriggeredImageID is used internally by the ImageChangeController to save last
// used image ID for build
LastTriggeredImageID string
// From is a reference to an ImageStreamTag that will trigger a build when updated
// It is optional. If no From is specified, the From image from the build strategy
// will be used. Only one ImageChangeTrigger with an empty From reference is allowed in
// a build configuration.
From *kapi.ObjectReference
}
// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.
type BuildTriggerPolicy struct {
// Type is the type of build trigger
Type BuildTriggerType
// GitHubWebHook contains the parameters for a GitHub webhook type of trigger
GitHubWebHook *WebHookTrigger
// GenericWebHook contains the parameters for a Generic webhook type of trigger
GenericWebHook *WebHookTrigger
// ImageChange contains parameters for an ImageChange type of trigger
ImageChange *ImageChangeTrigger
}
// BuildTriggerType refers to a specific BuildTriggerPolicy implementation.
type BuildTriggerType string
//NOTE: Adding a new trigger type requires adding the type to KnownTriggerTypes
var KnownTriggerTypes = sets.NewString(
string(GitHubWebHookBuildTriggerType),
string(GenericWebHookBuildTriggerType),
string(ImageChangeBuildTriggerType),
string(ConfigChangeBuildTriggerType),
)
const (
// GitHubWebHookBuildTriggerType represents a trigger that launches builds on
// GitHub webhook invocations
GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub"
GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github"
// GenericWebHookBuildTriggerType represents a trigger that launches builds on
// generic webhook invocations
GenericWebHookBuildTriggerType BuildTriggerType = "Generic"
GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic"
// ImageChangeBuildTriggerType represents a trigger that launches builds on
// availability of a new version of an image
ImageChangeBuildTriggerType BuildTriggerType = "ImageChange"
ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange"
// ConfigChangeBuildTriggerType will trigger a build on an initial build config creation
// WARNING: In the future the behavior will change to trigger a build on any config change
ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange"
)
// BuildList is a collection of Builds.
type BuildList struct {
unversioned.TypeMeta
unversioned.ListMeta
// Items is a list of builds
Items []Build
}
// BuildConfigList is a collection of BuildConfigs.
type BuildConfigList struct {
unversioned.TypeMeta
unversioned.ListMeta
// Items is a list of build configs
Items []BuildConfig
}
// GenericWebHookEvent is the payload expected for a generic webhook post
type GenericWebHookEvent struct {
// Git is the git information, if any.
Git *GitInfo
// Env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar
}
// GitInfo is the aggregated git information for a generic webhook post
type GitInfo struct {
GitBuildSource
GitSourceRevision
// Refs is a list of GitRefs for the provided repo - generally sent
// when used from a post-receive hook. This field is optional and is
// used when sending multiple refs
Refs []GitRefInfo
}
// GitRefInfo is a single ref
type GitRefInfo struct {
GitBuildSource
GitSourceRevision
}
// BuildLog is the (unused) resource associated with the build log redirector
type BuildLog struct {
unversioned.TypeMeta
}
// BuildRequest is the resource used to pass parameters to build generator
type BuildRequest struct {
unversioned.TypeMeta
// TODO: build request should allow name generation via Name and GenerateName, build config
// name should be provided as a separate field
kapi.ObjectMeta
// Revision is the information from the source for a specific repo snapshot.
Revision *SourceRevision
// TriggeredByImage is the Image that triggered this build.
TriggeredByImage *kapi.ObjectReference
// From is the reference to the ImageStreamTag that triggered the build.
From *kapi.ObjectReference
// Binary indicates a request to build from a binary provided to the builder
Binary *BinaryBuildSource
// LastVersion (optional) is the LastVersion of the BuildConfig that was used
// to generate the build. If the BuildConfig in the generator doesn't match,
// a build will not be generated.
LastVersion *int64
// Env contains additional environment variables you want to pass into a builder container.
Env []kapi.EnvVar
// TriggeredBy describes which triggers started the most recent update to the
// buildconfig and contains information about those triggers.
TriggeredBy []BuildTriggerCause
}
type BinaryBuildRequestOptions struct {
unversioned.TypeMeta
kapi.ObjectMeta
AsFile string
// TODO: support structs in query arguments in the future (inline and nested fields)
// Commit is the value identifying a specific commit
Commit string
// Message is the description of a specific commit
Message string
// AuthorName of the source control user
AuthorName string
// AuthorEmail of the source control user
AuthorEmail string
// CommitterName of the source control user
CommitterName string
// CommitterEmail of the source control user
CommitterEmail string
}
// BuildLogOptions is the REST options for a build log
type BuildLogOptions struct {
unversioned.TypeMeta
// Container for which to return logs
Container string
// Follow if true indicates that the build log should be streamed until
// the build terminates.
Follow bool
// If true, return previous build logs.
Previous bool
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceSeconds *int64
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceTime *unversioned.Time
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output.
Timestamps bool
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
TailLines *int64
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
LimitBytes *int64
// NoWait if true causes the call to return immediately even if the build
// is not available yet. Otherwise the server will wait until the build has started.
NoWait bool
// Version of the build for which to view logs.
Version *int64
}
// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point
type SecretSpec struct {
// SecretSource is a reference to the secret
SecretSource kapi.LocalObjectReference
// MountPath is the path at which to mount the secret
MountPath string
}

View File

@ -0,0 +1,68 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/validation"
"github.com/openshift/origin/pkg/util/namer"
)
const (
// BuildPodSuffix is the suffix used to append to a build pod name given a build name
BuildPodSuffix = "build"
)
// GetBuildPodName returns name of the build pod.
func GetBuildPodName(build *Build) string {
return namer.GetPodName(build.Name, BuildPodSuffix)
}
func StrategyType(strategy BuildStrategy) string {
switch {
case strategy.DockerStrategy != nil:
return "Docker"
case strategy.CustomStrategy != nil:
return "Custom"
case strategy.SourceStrategy != nil:
return "Source"
case strategy.JenkinsPipelineStrategy != nil:
return "JenkinsPipeline"
}
return ""
}
func SourceType(source BuildSource) string {
var sourceType string
if source.Git != nil {
sourceType = "Git"
}
if source.Dockerfile != nil {
if len(sourceType) != 0 {
sourceType = sourceType + ","
}
sourceType = sourceType + "Dockerfile"
}
if source.Binary != nil {
if len(sourceType) != 0 {
sourceType = sourceType + ","
}
sourceType = sourceType + "Binary"
}
return sourceType
}
// LabelValue returns a string to use as a value for the Build
// label in a pod. If the length of the string parameter exceeds
// the maximum label length, the value will be truncated.
func LabelValue(name string) string {
if len(name) <= validation.DNS1123LabelMaxLength {
return name
}
return name[:validation.DNS1123LabelMaxLength]
}
// GetBuildName returns the name of a Build associated with the
// given Pod.
func GetBuildName(pod *kapi.Pod) string {
return pod.Annotations[BuildAnnotation]
}

View File

@ -0,0 +1,152 @@
package v1
import (
"fmt"
"math"
"reflect"
"strings"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
oapi "github.com/openshift/origin/pkg/api"
newer "github.com/openshift/origin/pkg/deploy/api"
imageapi "github.com/openshift/origin/pkg/image/api"
)
func Convert_v1_DeploymentTriggerImageChangeParams_To_api_DeploymentTriggerImageChangeParams(in *DeploymentTriggerImageChangeParams, out *newer.DeploymentTriggerImageChangeParams, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentTriggerImageChangeParams))(in)
}
if err := s.DefaultConvert(in, out, conversion.IgnoreMissingFields); err != nil {
return err
}
switch in.From.Kind {
case "ImageStreamTag":
case "ImageStream", "ImageRepository":
out.From.Kind = "ImageStreamTag"
if !strings.Contains(out.From.Name, ":") {
out.From.Name = imageapi.JoinImageStreamTag(out.From.Name, imageapi.DefaultImageTag)
}
default:
// Will be handled by validation
}
return nil
}
func Convert_api_DeploymentTriggerImageChangeParams_To_v1_DeploymentTriggerImageChangeParams(in *newer.DeploymentTriggerImageChangeParams, out *DeploymentTriggerImageChangeParams, s conversion.Scope) error {
if err := s.DefaultConvert(in, out, conversion.IgnoreMissingFields); err != nil {
return err
}
switch in.From.Kind {
case "ImageStreamTag":
case "ImageStream", "ImageRepository":
out.From.Kind = "ImageStreamTag"
if !strings.Contains(out.From.Name, ":") {
out.From.Name = imageapi.JoinImageStreamTag(out.From.Name, imageapi.DefaultImageTag)
}
default:
// Will be handled by validation
}
return nil
}
func Convert_v1_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategyParams(in *RollingDeploymentStrategyParams, out *newer.RollingDeploymentStrategyParams, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RollingDeploymentStrategyParams))(in)
}
out.UpdatePeriodSeconds = in.UpdatePeriodSeconds
out.IntervalSeconds = in.IntervalSeconds
out.TimeoutSeconds = in.TimeoutSeconds
out.UpdatePercent = in.UpdatePercent
if in.Pre != nil {
if err := s.Convert(&in.Pre, &out.Pre, 0); err != nil {
return err
}
}
if in.Post != nil {
if err := s.Convert(&in.Post, &out.Post, 0); err != nil {
return err
}
}
if in.UpdatePercent != nil {
pct := intstr.FromString(fmt.Sprintf("%d%%", int(math.Abs(float64(*in.UpdatePercent)))))
if *in.UpdatePercent > 0 {
out.MaxSurge = pct
} else {
out.MaxUnavailable = pct
}
} else {
if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil {
return err
}
if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil {
return err
}
}
return nil
}
func Convert_api_RollingDeploymentStrategyParams_To_v1_RollingDeploymentStrategyParams(in *newer.RollingDeploymentStrategyParams, out *RollingDeploymentStrategyParams, s conversion.Scope) error {
out.UpdatePeriodSeconds = in.UpdatePeriodSeconds
out.IntervalSeconds = in.IntervalSeconds
out.TimeoutSeconds = in.TimeoutSeconds
out.UpdatePercent = in.UpdatePercent
if in.Pre != nil {
if err := s.Convert(&in.Pre, &out.Pre, 0); err != nil {
return err
}
}
if in.Post != nil {
if err := s.Convert(&in.Post, &out.Post, 0); err != nil {
return err
}
}
if out.MaxUnavailable == nil {
out.MaxUnavailable = &intstr.IntOrString{}
}
if out.MaxSurge == nil {
out.MaxSurge = &intstr.IntOrString{}
}
if in.UpdatePercent != nil {
pct := intstr.FromString(fmt.Sprintf("%d%%", int(math.Abs(float64(*in.UpdatePercent)))))
if *in.UpdatePercent > 0 {
out.MaxSurge = &pct
} else {
out.MaxUnavailable = &pct
}
} else {
if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil {
return err
}
if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil {
return err
}
}
return nil
}
func addConversionFuncs(scheme *runtime.Scheme) {
err := scheme.AddConversionFuncs(
Convert_v1_DeploymentTriggerImageChangeParams_To_api_DeploymentTriggerImageChangeParams,
Convert_api_DeploymentTriggerImageChangeParams_To_v1_DeploymentTriggerImageChangeParams,
Convert_v1_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategyParams,
Convert_api_RollingDeploymentStrategyParams_To_v1_RollingDeploymentStrategyParams,
)
if err != nil {
panic(err)
}
if err := scheme.AddFieldLabelConversionFunc("v1", "DeploymentConfig",
oapi.GetFieldLabelConversionFunc(newer.DeploymentConfigToSelectableFields(&newer.DeploymentConfig{}), nil),
); err != nil {
panic(err)
}
}

View File

@ -0,0 +1,878 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by conversion-gen. Do not edit it manually!
package v1
import (
deploy_api "github.com/openshift/origin/pkg/deploy/api"
api "k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
conversion "k8s.io/kubernetes/pkg/conversion"
)
func init() {
if err := api.Scheme.AddGeneratedConversionFuncs(
Convert_v1_CustomDeploymentStrategyParams_To_api_CustomDeploymentStrategyParams,
Convert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams,
Convert_v1_DeploymentCause_To_api_DeploymentCause,
Convert_api_DeploymentCause_To_v1_DeploymentCause,
Convert_v1_DeploymentCauseImageTrigger_To_api_DeploymentCauseImageTrigger,
Convert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger,
Convert_v1_DeploymentConfig_To_api_DeploymentConfig,
Convert_api_DeploymentConfig_To_v1_DeploymentConfig,
Convert_v1_DeploymentConfigList_To_api_DeploymentConfigList,
Convert_api_DeploymentConfigList_To_v1_DeploymentConfigList,
Convert_v1_DeploymentConfigRollback_To_api_DeploymentConfigRollback,
Convert_api_DeploymentConfigRollback_To_v1_DeploymentConfigRollback,
Convert_v1_DeploymentConfigRollbackSpec_To_api_DeploymentConfigRollbackSpec,
Convert_api_DeploymentConfigRollbackSpec_To_v1_DeploymentConfigRollbackSpec,
Convert_v1_DeploymentConfigSpec_To_api_DeploymentConfigSpec,
Convert_api_DeploymentConfigSpec_To_v1_DeploymentConfigSpec,
Convert_v1_DeploymentConfigStatus_To_api_DeploymentConfigStatus,
Convert_api_DeploymentConfigStatus_To_v1_DeploymentConfigStatus,
Convert_v1_DeploymentDetails_To_api_DeploymentDetails,
Convert_api_DeploymentDetails_To_v1_DeploymentDetails,
Convert_v1_DeploymentLog_To_api_DeploymentLog,
Convert_api_DeploymentLog_To_v1_DeploymentLog,
Convert_v1_DeploymentLogOptions_To_api_DeploymentLogOptions,
Convert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions,
Convert_v1_DeploymentStrategy_To_api_DeploymentStrategy,
Convert_api_DeploymentStrategy_To_v1_DeploymentStrategy,
Convert_v1_DeploymentTriggerImageChangeParams_To_api_DeploymentTriggerImageChangeParams,
Convert_api_DeploymentTriggerImageChangeParams_To_v1_DeploymentTriggerImageChangeParams,
Convert_v1_DeploymentTriggerPolicy_To_api_DeploymentTriggerPolicy,
Convert_api_DeploymentTriggerPolicy_To_v1_DeploymentTriggerPolicy,
Convert_v1_ExecNewPodHook_To_api_ExecNewPodHook,
Convert_api_ExecNewPodHook_To_v1_ExecNewPodHook,
Convert_v1_LifecycleHook_To_api_LifecycleHook,
Convert_api_LifecycleHook_To_v1_LifecycleHook,
Convert_v1_RecreateDeploymentStrategyParams_To_api_RecreateDeploymentStrategyParams,
Convert_api_RecreateDeploymentStrategyParams_To_v1_RecreateDeploymentStrategyParams,
Convert_v1_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategyParams,
Convert_api_RollingDeploymentStrategyParams_To_v1_RollingDeploymentStrategyParams,
Convert_v1_TagImageHook_To_api_TagImageHook,
Convert_api_TagImageHook_To_v1_TagImageHook,
); err != nil {
// if one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
func autoConvert_v1_CustomDeploymentStrategyParams_To_api_CustomDeploymentStrategyParams(in *CustomDeploymentStrategyParams, out *deploy_api.CustomDeploymentStrategyParams, s conversion.Scope) error {
out.Image = in.Image
if in.Environment != nil {
in, out := &in.Environment, &out.Environment
*out = make([]api.EnvVar, len(*in))
for i := range *in {
if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Environment = nil
}
out.Command = in.Command
return nil
}
func Convert_v1_CustomDeploymentStrategyParams_To_api_CustomDeploymentStrategyParams(in *CustomDeploymentStrategyParams, out *deploy_api.CustomDeploymentStrategyParams, s conversion.Scope) error {
return autoConvert_v1_CustomDeploymentStrategyParams_To_api_CustomDeploymentStrategyParams(in, out, s)
}
func autoConvert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams(in *deploy_api.CustomDeploymentStrategyParams, out *CustomDeploymentStrategyParams, s conversion.Scope) error {
out.Image = in.Image
if in.Environment != nil {
in, out := &in.Environment, &out.Environment
*out = make([]api_v1.EnvVar, len(*in))
for i := range *in {
if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Environment = nil
}
out.Command = in.Command
return nil
}
func Convert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams(in *deploy_api.CustomDeploymentStrategyParams, out *CustomDeploymentStrategyParams, s conversion.Scope) error {
return autoConvert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams(in, out, s)
}
func autoConvert_v1_DeploymentCause_To_api_DeploymentCause(in *DeploymentCause, out *deploy_api.DeploymentCause, s conversion.Scope) error {
out.Type = deploy_api.DeploymentTriggerType(in.Type)
if in.ImageTrigger != nil {
in, out := &in.ImageTrigger, &out.ImageTrigger
*out = new(deploy_api.DeploymentCauseImageTrigger)
if err := Convert_v1_DeploymentCauseImageTrigger_To_api_DeploymentCauseImageTrigger(*in, *out, s); err != nil {
return err
}
} else {
out.ImageTrigger = nil
}
return nil
}
func Convert_v1_DeploymentCause_To_api_DeploymentCause(in *DeploymentCause, out *deploy_api.DeploymentCause, s conversion.Scope) error {
return autoConvert_v1_DeploymentCause_To_api_DeploymentCause(in, out, s)
}
func autoConvert_api_DeploymentCause_To_v1_DeploymentCause(in *deploy_api.DeploymentCause, out *DeploymentCause, s conversion.Scope) error {
out.Type = DeploymentTriggerType(in.Type)
if in.ImageTrigger != nil {
in, out := &in.ImageTrigger, &out.ImageTrigger
*out = new(DeploymentCauseImageTrigger)
if err := Convert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger(*in, *out, s); err != nil {
return err
}
} else {
out.ImageTrigger = nil
}
return nil
}
func Convert_api_DeploymentCause_To_v1_DeploymentCause(in *deploy_api.DeploymentCause, out *DeploymentCause, s conversion.Scope) error {
return autoConvert_api_DeploymentCause_To_v1_DeploymentCause(in, out, s)
}
func autoConvert_v1_DeploymentCauseImageTrigger_To_api_DeploymentCauseImageTrigger(in *DeploymentCauseImageTrigger, out *deploy_api.DeploymentCauseImageTrigger, s conversion.Scope) error {
if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil {
return err
}
return nil
}
func Convert_v1_DeploymentCauseImageTrigger_To_api_DeploymentCauseImageTrigger(in *DeploymentCauseImageTrigger, out *deploy_api.DeploymentCauseImageTrigger, s conversion.Scope) error {
return autoConvert_v1_DeploymentCauseImageTrigger_To_api_DeploymentCauseImageTrigger(in, out, s)
}
func autoConvert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger(in *deploy_api.DeploymentCauseImageTrigger, out *DeploymentCauseImageTrigger, s conversion.Scope) error {
if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.From, &out.From, s); err != nil {
return err
}
return nil
}
func Convert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger(in *deploy_api.DeploymentCauseImageTrigger, out *DeploymentCauseImageTrigger, s conversion.Scope) error {
return autoConvert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger(in, out, s)
}
func autoConvert_v1_DeploymentConfig_To_api_DeploymentConfig(in *DeploymentConfig, out *deploy_api.DeploymentConfig, s conversion.Scope) error {
SetDefaults_DeploymentConfig(in)
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
if err := api_v1.Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
}
if err := Convert_v1_DeploymentConfigSpec_To_api_DeploymentConfigSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_DeploymentConfigStatus_To_api_DeploymentConfigStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func Convert_v1_DeploymentConfig_To_api_DeploymentConfig(in *DeploymentConfig, out *deploy_api.DeploymentConfig, s conversion.Scope) error {
return autoConvert_v1_DeploymentConfig_To_api_DeploymentConfig(in, out, s)
}
func autoConvert_api_DeploymentConfig_To_v1_DeploymentConfig(in *deploy_api.DeploymentConfig, out *DeploymentConfig, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
if err := api_v1.Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
}
if err := Convert_api_DeploymentConfigSpec_To_v1_DeploymentConfigSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_api_DeploymentConfigStatus_To_v1_DeploymentConfigStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
func Convert_api_DeploymentConfig_To_v1_DeploymentConfig(in *deploy_api.DeploymentConfig, out *DeploymentConfig, s conversion.Scope) error {
return autoConvert_api_DeploymentConfig_To_v1_DeploymentConfig(in, out, s)
}
func autoConvert_v1_DeploymentConfigList_To_api_DeploymentConfigList(in *DeploymentConfigList, out *deploy_api.DeploymentConfigList, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
return err
}
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]deploy_api.DeploymentConfig, len(*in))
for i := range *in {
if err := Convert_v1_DeploymentConfig_To_api_DeploymentConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func Convert_v1_DeploymentConfigList_To_api_DeploymentConfigList(in *DeploymentConfigList, out *deploy_api.DeploymentConfigList, s conversion.Scope) error {
return autoConvert_v1_DeploymentConfigList_To_api_DeploymentConfigList(in, out, s)
}
func autoConvert_api_DeploymentConfigList_To_v1_DeploymentConfigList(in *deploy_api.DeploymentConfigList, out *DeploymentConfigList, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {
return err
}
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DeploymentConfig, len(*in))
for i := range *in {
if err := Convert_api_DeploymentConfig_To_v1_DeploymentConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func Convert_api_DeploymentConfigList_To_v1_DeploymentConfigList(in *deploy_api.DeploymentConfigList, out *DeploymentConfigList, s conversion.Scope) error {
return autoConvert_api_DeploymentConfigList_To_v1_DeploymentConfigList(in, out, s)
}
func autoConvert_v1_DeploymentConfigRollback_To_api_DeploymentConfigRollback(in *DeploymentConfigRollback, out *deploy_api.DeploymentConfigRollback, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
out.Name = in.Name
out.UpdatedAnnotations = in.UpdatedAnnotations
if err := Convert_v1_DeploymentConfigRollbackSpec_To_api_DeploymentConfigRollbackSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
func Convert_v1_DeploymentConfigRollback_To_api_DeploymentConfigRollback(in *DeploymentConfigRollback, out *deploy_api.DeploymentConfigRollback, s conversion.Scope) error {
return autoConvert_v1_DeploymentConfigRollback_To_api_DeploymentConfigRollback(in, out, s)
}
func autoConvert_api_DeploymentConfigRollback_To_v1_DeploymentConfigRollback(in *deploy_api.DeploymentConfigRollback, out *DeploymentConfigRollback, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
out.Name = in.Name
out.UpdatedAnnotations = in.UpdatedAnnotations
if err := Convert_api_DeploymentConfigRollbackSpec_To_v1_DeploymentConfigRollbackSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
func Convert_api_DeploymentConfigRollback_To_v1_DeploymentConfigRollback(in *deploy_api.DeploymentConfigRollback, out *DeploymentConfigRollback, s conversion.Scope) error {
return autoConvert_api_DeploymentConfigRollback_To_v1_DeploymentConfigRollback(in, out, s)
}
func autoConvert_v1_DeploymentConfigRollbackSpec_To_api_DeploymentConfigRollbackSpec(in *DeploymentConfigRollbackSpec, out *deploy_api.DeploymentConfigRollbackSpec, s conversion.Scope) error {
if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil {
return err
}
out.Revision = in.Revision
out.IncludeTriggers = in.IncludeTriggers
out.IncludeTemplate = in.IncludeTemplate
out.IncludeReplicationMeta = in.IncludeReplicationMeta
out.IncludeStrategy = in.IncludeStrategy
return nil
}
func Convert_v1_DeploymentConfigRollbackSpec_To_api_DeploymentConfigRollbackSpec(in *DeploymentConfigRollbackSpec, out *deploy_api.DeploymentConfigRollbackSpec, s conversion.Scope) error {
return autoConvert_v1_DeploymentConfigRollbackSpec_To_api_DeploymentConfigRollbackSpec(in, out, s)
}
func autoConvert_api_DeploymentConfigRollbackSpec_To_v1_DeploymentConfigRollbackSpec(in *deploy_api.DeploymentConfigRollbackSpec, out *DeploymentConfigRollbackSpec, s conversion.Scope) error {
if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.From, &out.From, s); err != nil {
return err
}
out.Revision = in.Revision
out.IncludeTriggers = in.IncludeTriggers
out.IncludeTemplate = in.IncludeTemplate
out.IncludeReplicationMeta = in.IncludeReplicationMeta
out.IncludeStrategy = in.IncludeStrategy
return nil
}
func Convert_api_DeploymentConfigRollbackSpec_To_v1_DeploymentConfigRollbackSpec(in *deploy_api.DeploymentConfigRollbackSpec, out *DeploymentConfigRollbackSpec, s conversion.Scope) error {
return autoConvert_api_DeploymentConfigRollbackSpec_To_v1_DeploymentConfigRollbackSpec(in, out, s)
}
func autoConvert_v1_DeploymentConfigSpec_To_api_DeploymentConfigSpec(in *DeploymentConfigSpec, out *deploy_api.DeploymentConfigSpec, s conversion.Scope) error {
SetDefaults_DeploymentConfigSpec(in)
if err := Convert_v1_DeploymentStrategy_To_api_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
if in.Triggers != nil {
in, out := &in.Triggers, &out.Triggers
*out = make([]deploy_api.DeploymentTriggerPolicy, len(*in))
for i := range *in {
if err := Convert_v1_DeploymentTriggerPolicy_To_api_DeploymentTriggerPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Triggers = nil
}
out.Replicas = in.Replicas
out.RevisionHistoryLimit = in.RevisionHistoryLimit
out.Test = in.Test
out.Paused = in.Paused
out.Selector = in.Selector
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(api.PodTemplateSpec)
if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_v1_DeploymentConfigSpec_To_api_DeploymentConfigSpec(in *DeploymentConfigSpec, out *deploy_api.DeploymentConfigSpec, s conversion.Scope) error {
return autoConvert_v1_DeploymentConfigSpec_To_api_DeploymentConfigSpec(in, out, s)
}
func autoConvert_api_DeploymentConfigSpec_To_v1_DeploymentConfigSpec(in *deploy_api.DeploymentConfigSpec, out *DeploymentConfigSpec, s conversion.Scope) error {
if err := Convert_api_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
if in.Triggers != nil {
in, out := &in.Triggers, &out.Triggers
*out = make([]DeploymentTriggerPolicy, len(*in))
for i := range *in {
if err := Convert_api_DeploymentTriggerPolicy_To_v1_DeploymentTriggerPolicy(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Triggers = nil
}
out.Replicas = in.Replicas
out.RevisionHistoryLimit = in.RevisionHistoryLimit
out.Test = in.Test
out.Paused = in.Paused
out.Selector = in.Selector
if in.Template != nil {
in, out := &in.Template, &out.Template
*out = new(api_v1.PodTemplateSpec)
if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func Convert_api_DeploymentConfigSpec_To_v1_DeploymentConfigSpec(in *deploy_api.DeploymentConfigSpec, out *DeploymentConfigSpec, s conversion.Scope) error {
return autoConvert_api_DeploymentConfigSpec_To_v1_DeploymentConfigSpec(in, out, s)
}
func autoConvert_v1_DeploymentConfigStatus_To_api_DeploymentConfigStatus(in *DeploymentConfigStatus, out *deploy_api.DeploymentConfigStatus, s conversion.Scope) error {
out.LatestVersion = in.LatestVersion
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
if in.Details != nil {
in, out := &in.Details, &out.Details
*out = new(deploy_api.DeploymentDetails)
if err := Convert_v1_DeploymentDetails_To_api_DeploymentDetails(*in, *out, s); err != nil {
return err
}
} else {
out.Details = nil
}
return nil
}
func Convert_v1_DeploymentConfigStatus_To_api_DeploymentConfigStatus(in *DeploymentConfigStatus, out *deploy_api.DeploymentConfigStatus, s conversion.Scope) error {
return autoConvert_v1_DeploymentConfigStatus_To_api_DeploymentConfigStatus(in, out, s)
}
func autoConvert_api_DeploymentConfigStatus_To_v1_DeploymentConfigStatus(in *deploy_api.DeploymentConfigStatus, out *DeploymentConfigStatus, s conversion.Scope) error {
out.LatestVersion = in.LatestVersion
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
if in.Details != nil {
in, out := &in.Details, &out.Details
*out = new(DeploymentDetails)
if err := Convert_api_DeploymentDetails_To_v1_DeploymentDetails(*in, *out, s); err != nil {
return err
}
} else {
out.Details = nil
}
return nil
}
func Convert_api_DeploymentConfigStatus_To_v1_DeploymentConfigStatus(in *deploy_api.DeploymentConfigStatus, out *DeploymentConfigStatus, s conversion.Scope) error {
return autoConvert_api_DeploymentConfigStatus_To_v1_DeploymentConfigStatus(in, out, s)
}
func autoConvert_v1_DeploymentDetails_To_api_DeploymentDetails(in *DeploymentDetails, out *deploy_api.DeploymentDetails, s conversion.Scope) error {
out.Message = in.Message
if in.Causes != nil {
in, out := &in.Causes, &out.Causes
*out = make([]deploy_api.DeploymentCause, len(*in))
for i := range *in {
if err := Convert_v1_DeploymentCause_To_api_DeploymentCause(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Causes = nil
}
return nil
}
func Convert_v1_DeploymentDetails_To_api_DeploymentDetails(in *DeploymentDetails, out *deploy_api.DeploymentDetails, s conversion.Scope) error {
return autoConvert_v1_DeploymentDetails_To_api_DeploymentDetails(in, out, s)
}
func autoConvert_api_DeploymentDetails_To_v1_DeploymentDetails(in *deploy_api.DeploymentDetails, out *DeploymentDetails, s conversion.Scope) error {
out.Message = in.Message
if in.Causes != nil {
in, out := &in.Causes, &out.Causes
*out = make([]DeploymentCause, len(*in))
for i := range *in {
if err := Convert_api_DeploymentCause_To_v1_DeploymentCause(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Causes = nil
}
return nil
}
func Convert_api_DeploymentDetails_To_v1_DeploymentDetails(in *deploy_api.DeploymentDetails, out *DeploymentDetails, s conversion.Scope) error {
return autoConvert_api_DeploymentDetails_To_v1_DeploymentDetails(in, out, s)
}
func autoConvert_v1_DeploymentLog_To_api_DeploymentLog(in *DeploymentLog, out *deploy_api.DeploymentLog, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
return nil
}
func Convert_v1_DeploymentLog_To_api_DeploymentLog(in *DeploymentLog, out *deploy_api.DeploymentLog, s conversion.Scope) error {
return autoConvert_v1_DeploymentLog_To_api_DeploymentLog(in, out, s)
}
func autoConvert_api_DeploymentLog_To_v1_DeploymentLog(in *deploy_api.DeploymentLog, out *DeploymentLog, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
return nil
}
func Convert_api_DeploymentLog_To_v1_DeploymentLog(in *deploy_api.DeploymentLog, out *DeploymentLog, s conversion.Scope) error {
return autoConvert_api_DeploymentLog_To_v1_DeploymentLog(in, out, s)
}
func autoConvert_v1_DeploymentLogOptions_To_api_DeploymentLogOptions(in *DeploymentLogOptions, out *deploy_api.DeploymentLogOptions, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
out.Container = in.Container
out.Follow = in.Follow
out.Previous = in.Previous
out.SinceSeconds = in.SinceSeconds
out.SinceTime = in.SinceTime
out.Timestamps = in.Timestamps
out.TailLines = in.TailLines
out.LimitBytes = in.LimitBytes
out.NoWait = in.NoWait
out.Version = in.Version
return nil
}
func Convert_v1_DeploymentLogOptions_To_api_DeploymentLogOptions(in *DeploymentLogOptions, out *deploy_api.DeploymentLogOptions, s conversion.Scope) error {
return autoConvert_v1_DeploymentLogOptions_To_api_DeploymentLogOptions(in, out, s)
}
func autoConvert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions(in *deploy_api.DeploymentLogOptions, out *DeploymentLogOptions, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
}
out.Container = in.Container
out.Follow = in.Follow
out.Previous = in.Previous
out.SinceSeconds = in.SinceSeconds
out.SinceTime = in.SinceTime
out.Timestamps = in.Timestamps
out.TailLines = in.TailLines
out.LimitBytes = in.LimitBytes
out.NoWait = in.NoWait
out.Version = in.Version
return nil
}
func Convert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions(in *deploy_api.DeploymentLogOptions, out *DeploymentLogOptions, s conversion.Scope) error {
return autoConvert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions(in, out, s)
}
func autoConvert_v1_DeploymentStrategy_To_api_DeploymentStrategy(in *DeploymentStrategy, out *deploy_api.DeploymentStrategy, s conversion.Scope) error {
SetDefaults_DeploymentStrategy(in)
out.Type = deploy_api.DeploymentStrategyType(in.Type)
if in.CustomParams != nil {
in, out := &in.CustomParams, &out.CustomParams
*out = new(deploy_api.CustomDeploymentStrategyParams)
if err := Convert_v1_CustomDeploymentStrategyParams_To_api_CustomDeploymentStrategyParams(*in, *out, s); err != nil {
return err
}
} else {
out.CustomParams = nil
}
if in.RecreateParams != nil {
in, out := &in.RecreateParams, &out.RecreateParams
*out = new(deploy_api.RecreateDeploymentStrategyParams)
if err := Convert_v1_RecreateDeploymentStrategyParams_To_api_RecreateDeploymentStrategyParams(*in, *out, s); err != nil {
return err
}
} else {
out.RecreateParams = nil
}
if in.RollingParams != nil {
in, out := &in.RollingParams, &out.RollingParams
*out = new(deploy_api.RollingDeploymentStrategyParams)
if err := Convert_v1_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategyParams(*in, *out, s); err != nil {
return err
}
} else {
out.RollingParams = nil
}
if err := api_v1.Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.Labels = in.Labels
out.Annotations = in.Annotations
return nil
}
func Convert_v1_DeploymentStrategy_To_api_DeploymentStrategy(in *DeploymentStrategy, out *deploy_api.DeploymentStrategy, s conversion.Scope) error {
return autoConvert_v1_DeploymentStrategy_To_api_DeploymentStrategy(in, out, s)
}
func autoConvert_api_DeploymentStrategy_To_v1_DeploymentStrategy(in *deploy_api.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
out.Type = DeploymentStrategyType(in.Type)
if in.RecreateParams != nil {
in, out := &in.RecreateParams, &out.RecreateParams
*out = new(RecreateDeploymentStrategyParams)
if err := Convert_api_RecreateDeploymentStrategyParams_To_v1_RecreateDeploymentStrategyParams(*in, *out, s); err != nil {
return err
}
} else {
out.RecreateParams = nil
}
if in.RollingParams != nil {
in, out := &in.RollingParams, &out.RollingParams
*out = new(RollingDeploymentStrategyParams)
if err := Convert_api_RollingDeploymentStrategyParams_To_v1_RollingDeploymentStrategyParams(*in, *out, s); err != nil {
return err
}
} else {
out.RollingParams = nil
}
if in.CustomParams != nil {
in, out := &in.CustomParams, &out.CustomParams
*out = new(CustomDeploymentStrategyParams)
if err := Convert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams(*in, *out, s); err != nil {
return err
}
} else {
out.CustomParams = nil
}
if err := api_v1.Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.Labels = in.Labels
out.Annotations = in.Annotations
return nil
}
func Convert_api_DeploymentStrategy_To_v1_DeploymentStrategy(in *deploy_api.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
return autoConvert_api_DeploymentStrategy_To_v1_DeploymentStrategy(in, out, s)
}
func autoConvert_v1_DeploymentTriggerImageChangeParams_To_api_DeploymentTriggerImageChangeParams(in *DeploymentTriggerImageChangeParams, out *deploy_api.DeploymentTriggerImageChangeParams, s conversion.Scope) error {
out.Automatic = in.Automatic
out.ContainerNames = in.ContainerNames
if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil {
return err
}
out.LastTriggeredImage = in.LastTriggeredImage
return nil
}
func autoConvert_api_DeploymentTriggerImageChangeParams_To_v1_DeploymentTriggerImageChangeParams(in *deploy_api.DeploymentTriggerImageChangeParams, out *DeploymentTriggerImageChangeParams, s conversion.Scope) error {
out.Automatic = in.Automatic
out.ContainerNames = in.ContainerNames
if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.From, &out.From, s); err != nil {
return err
}
out.LastTriggeredImage = in.LastTriggeredImage
return nil
}
func autoConvert_v1_DeploymentTriggerPolicy_To_api_DeploymentTriggerPolicy(in *DeploymentTriggerPolicy, out *deploy_api.DeploymentTriggerPolicy, s conversion.Scope) error {
out.Type = deploy_api.DeploymentTriggerType(in.Type)
if in.ImageChangeParams != nil {
in, out := &in.ImageChangeParams, &out.ImageChangeParams
*out = new(deploy_api.DeploymentTriggerImageChangeParams)
if err := Convert_v1_DeploymentTriggerImageChangeParams_To_api_DeploymentTriggerImageChangeParams(*in, *out, s); err != nil {
return err
}
} else {
out.ImageChangeParams = nil
}
return nil
}
func Convert_v1_DeploymentTriggerPolicy_To_api_DeploymentTriggerPolicy(in *DeploymentTriggerPolicy, out *deploy_api.DeploymentTriggerPolicy, s conversion.Scope) error {
return autoConvert_v1_DeploymentTriggerPolicy_To_api_DeploymentTriggerPolicy(in, out, s)
}
func autoConvert_api_DeploymentTriggerPolicy_To_v1_DeploymentTriggerPolicy(in *deploy_api.DeploymentTriggerPolicy, out *DeploymentTriggerPolicy, s conversion.Scope) error {
out.Type = DeploymentTriggerType(in.Type)
if in.ImageChangeParams != nil {
in, out := &in.ImageChangeParams, &out.ImageChangeParams
*out = new(DeploymentTriggerImageChangeParams)
if err := Convert_api_DeploymentTriggerImageChangeParams_To_v1_DeploymentTriggerImageChangeParams(*in, *out, s); err != nil {
return err
}
} else {
out.ImageChangeParams = nil
}
return nil
}
func Convert_api_DeploymentTriggerPolicy_To_v1_DeploymentTriggerPolicy(in *deploy_api.DeploymentTriggerPolicy, out *DeploymentTriggerPolicy, s conversion.Scope) error {
return autoConvert_api_DeploymentTriggerPolicy_To_v1_DeploymentTriggerPolicy(in, out, s)
}
func autoConvert_v1_ExecNewPodHook_To_api_ExecNewPodHook(in *ExecNewPodHook, out *deploy_api.ExecNewPodHook, s conversion.Scope) error {
out.Command = in.Command
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]api.EnvVar, len(*in))
for i := range *in {
if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Env = nil
}
out.ContainerName = in.ContainerName
out.Volumes = in.Volumes
return nil
}
func Convert_v1_ExecNewPodHook_To_api_ExecNewPodHook(in *ExecNewPodHook, out *deploy_api.ExecNewPodHook, s conversion.Scope) error {
return autoConvert_v1_ExecNewPodHook_To_api_ExecNewPodHook(in, out, s)
}
func autoConvert_api_ExecNewPodHook_To_v1_ExecNewPodHook(in *deploy_api.ExecNewPodHook, out *ExecNewPodHook, s conversion.Scope) error {
out.Command = in.Command
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]api_v1.EnvVar, len(*in))
for i := range *in {
if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Env = nil
}
out.ContainerName = in.ContainerName
out.Volumes = in.Volumes
return nil
}
func Convert_api_ExecNewPodHook_To_v1_ExecNewPodHook(in *deploy_api.ExecNewPodHook, out *ExecNewPodHook, s conversion.Scope) error {
return autoConvert_api_ExecNewPodHook_To_v1_ExecNewPodHook(in, out, s)
}
func autoConvert_v1_LifecycleHook_To_api_LifecycleHook(in *LifecycleHook, out *deploy_api.LifecycleHook, s conversion.Scope) error {
out.FailurePolicy = deploy_api.LifecycleHookFailurePolicy(in.FailurePolicy)
if in.ExecNewPod != nil {
in, out := &in.ExecNewPod, &out.ExecNewPod
*out = new(deploy_api.ExecNewPodHook)
if err := Convert_v1_ExecNewPodHook_To_api_ExecNewPodHook(*in, *out, s); err != nil {
return err
}
} else {
out.ExecNewPod = nil
}
if in.TagImages != nil {
in, out := &in.TagImages, &out.TagImages
*out = make([]deploy_api.TagImageHook, len(*in))
for i := range *in {
if err := Convert_v1_TagImageHook_To_api_TagImageHook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.TagImages = nil
}
return nil
}
func Convert_v1_LifecycleHook_To_api_LifecycleHook(in *LifecycleHook, out *deploy_api.LifecycleHook, s conversion.Scope) error {
return autoConvert_v1_LifecycleHook_To_api_LifecycleHook(in, out, s)
}
func autoConvert_api_LifecycleHook_To_v1_LifecycleHook(in *deploy_api.LifecycleHook, out *LifecycleHook, s conversion.Scope) error {
out.FailurePolicy = LifecycleHookFailurePolicy(in.FailurePolicy)
if in.ExecNewPod != nil {
in, out := &in.ExecNewPod, &out.ExecNewPod
*out = new(ExecNewPodHook)
if err := Convert_api_ExecNewPodHook_To_v1_ExecNewPodHook(*in, *out, s); err != nil {
return err
}
} else {
out.ExecNewPod = nil
}
if in.TagImages != nil {
in, out := &in.TagImages, &out.TagImages
*out = make([]TagImageHook, len(*in))
for i := range *in {
if err := Convert_api_TagImageHook_To_v1_TagImageHook(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.TagImages = nil
}
return nil
}
func Convert_api_LifecycleHook_To_v1_LifecycleHook(in *deploy_api.LifecycleHook, out *LifecycleHook, s conversion.Scope) error {
return autoConvert_api_LifecycleHook_To_v1_LifecycleHook(in, out, s)
}
func autoConvert_v1_RecreateDeploymentStrategyParams_To_api_RecreateDeploymentStrategyParams(in *RecreateDeploymentStrategyParams, out *deploy_api.RecreateDeploymentStrategyParams, s conversion.Scope) error {
SetDefaults_RecreateDeploymentStrategyParams(in)
out.TimeoutSeconds = in.TimeoutSeconds
if in.Pre != nil {
in, out := &in.Pre, &out.Pre
*out = new(deploy_api.LifecycleHook)
if err := Convert_v1_LifecycleHook_To_api_LifecycleHook(*in, *out, s); err != nil {
return err
}
} else {
out.Pre = nil
}
if in.Mid != nil {
in, out := &in.Mid, &out.Mid
*out = new(deploy_api.LifecycleHook)
if err := Convert_v1_LifecycleHook_To_api_LifecycleHook(*in, *out, s); err != nil {
return err
}
} else {
out.Mid = nil
}
if in.Post != nil {
in, out := &in.Post, &out.Post
*out = new(deploy_api.LifecycleHook)
if err := Convert_v1_LifecycleHook_To_api_LifecycleHook(*in, *out, s); err != nil {
return err
}
} else {
out.Post = nil
}
return nil
}
func Convert_v1_RecreateDeploymentStrategyParams_To_api_RecreateDeploymentStrategyParams(in *RecreateDeploymentStrategyParams, out *deploy_api.RecreateDeploymentStrategyParams, s conversion.Scope) error {
return autoConvert_v1_RecreateDeploymentStrategyParams_To_api_RecreateDeploymentStrategyParams(in, out, s)
}
func autoConvert_api_RecreateDeploymentStrategyParams_To_v1_RecreateDeploymentStrategyParams(in *deploy_api.RecreateDeploymentStrategyParams, out *RecreateDeploymentStrategyParams, s conversion.Scope) error {
out.TimeoutSeconds = in.TimeoutSeconds
if in.Pre != nil {
in, out := &in.Pre, &out.Pre
*out = new(LifecycleHook)
if err := Convert_api_LifecycleHook_To_v1_LifecycleHook(*in, *out, s); err != nil {
return err
}
} else {
out.Pre = nil
}
if in.Mid != nil {
in, out := &in.Mid, &out.Mid
*out = new(LifecycleHook)
if err := Convert_api_LifecycleHook_To_v1_LifecycleHook(*in, *out, s); err != nil {
return err
}
} else {
out.Mid = nil
}
if in.Post != nil {
in, out := &in.Post, &out.Post
*out = new(LifecycleHook)
if err := Convert_api_LifecycleHook_To_v1_LifecycleHook(*in, *out, s); err != nil {
return err
}
} else {
out.Post = nil
}
return nil
}
func Convert_api_RecreateDeploymentStrategyParams_To_v1_RecreateDeploymentStrategyParams(in *deploy_api.RecreateDeploymentStrategyParams, out *RecreateDeploymentStrategyParams, s conversion.Scope) error {
return autoConvert_api_RecreateDeploymentStrategyParams_To_v1_RecreateDeploymentStrategyParams(in, out, s)
}
func autoConvert_v1_TagImageHook_To_api_TagImageHook(in *TagImageHook, out *deploy_api.TagImageHook, s conversion.Scope) error {
out.ContainerName = in.ContainerName
if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.To, &out.To, s); err != nil {
return err
}
return nil
}
func Convert_v1_TagImageHook_To_api_TagImageHook(in *TagImageHook, out *deploy_api.TagImageHook, s conversion.Scope) error {
return autoConvert_v1_TagImageHook_To_api_TagImageHook(in, out, s)
}
func autoConvert_api_TagImageHook_To_v1_TagImageHook(in *deploy_api.TagImageHook, out *TagImageHook, s conversion.Scope) error {
out.ContainerName = in.ContainerName
if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.To, &out.To, s); err != nil {
return err
}
return nil
}
func Convert_api_TagImageHook_To_v1_TagImageHook(in *deploy_api.TagImageHook, out *TagImageHook, s conversion.Scope) error {
return autoConvert_api_TagImageHook_To_v1_TagImageHook(in, out, s)
}

View File

@ -0,0 +1,544 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package v1
import (
api "k8s.io/kubernetes/pkg/api"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
conversion "k8s.io/kubernetes/pkg/conversion"
intstr "k8s.io/kubernetes/pkg/util/intstr"
)
func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_v1_CustomDeploymentStrategyParams,
DeepCopy_v1_DeploymentCause,
DeepCopy_v1_DeploymentCauseImageTrigger,
DeepCopy_v1_DeploymentConfig,
DeepCopy_v1_DeploymentConfigList,
DeepCopy_v1_DeploymentConfigRollback,
DeepCopy_v1_DeploymentConfigRollbackSpec,
DeepCopy_v1_DeploymentConfigSpec,
DeepCopy_v1_DeploymentConfigStatus,
DeepCopy_v1_DeploymentDetails,
DeepCopy_v1_DeploymentLog,
DeepCopy_v1_DeploymentLogOptions,
DeepCopy_v1_DeploymentStrategy,
DeepCopy_v1_DeploymentTriggerImageChangeParams,
DeepCopy_v1_DeploymentTriggerPolicy,
DeepCopy_v1_ExecNewPodHook,
DeepCopy_v1_LifecycleHook,
DeepCopy_v1_RecreateDeploymentStrategyParams,
DeepCopy_v1_RollingDeploymentStrategyParams,
DeepCopy_v1_TagImageHook,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
func DeepCopy_v1_CustomDeploymentStrategyParams(in CustomDeploymentStrategyParams, out *CustomDeploymentStrategyParams, c *conversion.Cloner) error {
out.Image = in.Image
if in.Environment != nil {
in, out := in.Environment, &out.Environment
*out = make([]api_v1.EnvVar, len(in))
for i := range in {
if err := api_v1.DeepCopy_v1_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Environment = nil
}
if in.Command != nil {
in, out := in.Command, &out.Command
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Command = nil
}
return nil
}
func DeepCopy_v1_DeploymentCause(in DeploymentCause, out *DeploymentCause, c *conversion.Cloner) error {
out.Type = in.Type
if in.ImageTrigger != nil {
in, out := in.ImageTrigger, &out.ImageTrigger
*out = new(DeploymentCauseImageTrigger)
if err := DeepCopy_v1_DeploymentCauseImageTrigger(*in, *out, c); err != nil {
return err
}
} else {
out.ImageTrigger = nil
}
return nil
}
func DeepCopy_v1_DeploymentCauseImageTrigger(in DeploymentCauseImageTrigger, out *DeploymentCauseImageTrigger, c *conversion.Cloner) error {
if err := api_v1.DeepCopy_v1_ObjectReference(in.From, &out.From, c); err != nil {
return err
}
return nil
}
func DeepCopy_v1_DeploymentConfig(in DeploymentConfig, out *DeploymentConfig, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := DeepCopy_v1_DeploymentConfigSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_v1_DeploymentConfigStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func DeepCopy_v1_DeploymentConfigList(in DeploymentConfigList, out *DeploymentConfigList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]DeploymentConfig, len(in))
for i := range in {
if err := DeepCopy_v1_DeploymentConfig(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_v1_DeploymentConfigRollback(in DeploymentConfigRollback, out *DeploymentConfigRollback, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Name = in.Name
if in.UpdatedAnnotations != nil {
in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations
*out = make(map[string]string)
for key, val := range in {
(*out)[key] = val
}
} else {
out.UpdatedAnnotations = nil
}
if err := DeepCopy_v1_DeploymentConfigRollbackSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
return nil
}
func DeepCopy_v1_DeploymentConfigRollbackSpec(in DeploymentConfigRollbackSpec, out *DeploymentConfigRollbackSpec, c *conversion.Cloner) error {
if err := api_v1.DeepCopy_v1_ObjectReference(in.From, &out.From, c); err != nil {
return err
}
out.Revision = in.Revision
out.IncludeTriggers = in.IncludeTriggers
out.IncludeTemplate = in.IncludeTemplate
out.IncludeReplicationMeta = in.IncludeReplicationMeta
out.IncludeStrategy = in.IncludeStrategy
return nil
}
func DeepCopy_v1_DeploymentConfigSpec(in DeploymentConfigSpec, out *DeploymentConfigSpec, c *conversion.Cloner) error {
if err := DeepCopy_v1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
return err
}
out.MinReadySeconds = in.MinReadySeconds
if in.Triggers != nil {
in, out := in.Triggers, &out.Triggers
*out = make([]DeploymentTriggerPolicy, len(in))
for i := range in {
if err := DeepCopy_v1_DeploymentTriggerPolicy(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Triggers = nil
}
out.Replicas = in.Replicas
if in.RevisionHistoryLimit != nil {
in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit
*out = new(int32)
**out = *in
} else {
out.RevisionHistoryLimit = nil
}
out.Test = in.Test
out.Paused = in.Paused
if in.Selector != nil {
in, out := in.Selector, &out.Selector
*out = make(map[string]string)
for key, val := range in {
(*out)[key] = val
}
} else {
out.Selector = nil
}
if in.Template != nil {
in, out := in.Template, &out.Template
*out = new(api_v1.PodTemplateSpec)
if err := api_v1.DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func DeepCopy_v1_DeploymentConfigStatus(in DeploymentConfigStatus, out *DeploymentConfigStatus, c *conversion.Cloner) error {
out.LatestVersion = in.LatestVersion
out.ObservedGeneration = in.ObservedGeneration
out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas
out.AvailableReplicas = in.AvailableReplicas
out.UnavailableReplicas = in.UnavailableReplicas
if in.Details != nil {
in, out := in.Details, &out.Details
*out = new(DeploymentDetails)
if err := DeepCopy_v1_DeploymentDetails(*in, *out, c); err != nil {
return err
}
} else {
out.Details = nil
}
return nil
}
func DeepCopy_v1_DeploymentDetails(in DeploymentDetails, out *DeploymentDetails, c *conversion.Cloner) error {
out.Message = in.Message
if in.Causes != nil {
in, out := in.Causes, &out.Causes
*out = make([]DeploymentCause, len(in))
for i := range in {
if err := DeepCopy_v1_DeploymentCause(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Causes = nil
}
return nil
}
func DeepCopy_v1_DeploymentLog(in DeploymentLog, out *DeploymentLog, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
return nil
}
func DeepCopy_v1_DeploymentLogOptions(in DeploymentLogOptions, out *DeploymentLogOptions, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Container = in.Container
out.Follow = in.Follow
out.Previous = in.Previous
if in.SinceSeconds != nil {
in, out := in.SinceSeconds, &out.SinceSeconds
*out = new(int64)
**out = *in
} else {
out.SinceSeconds = nil
}
if in.SinceTime != nil {
in, out := in.SinceTime, &out.SinceTime
*out = new(unversioned.Time)
if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil {
return err
}
} else {
out.SinceTime = nil
}
out.Timestamps = in.Timestamps
if in.TailLines != nil {
in, out := in.TailLines, &out.TailLines
*out = new(int64)
**out = *in
} else {
out.TailLines = nil
}
if in.LimitBytes != nil {
in, out := in.LimitBytes, &out.LimitBytes
*out = new(int64)
**out = *in
} else {
out.LimitBytes = nil
}
out.NoWait = in.NoWait
if in.Version != nil {
in, out := in.Version, &out.Version
*out = new(int64)
**out = *in
} else {
out.Version = nil
}
return nil
}
func DeepCopy_v1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
out.Type = in.Type
if in.CustomParams != nil {
in, out := in.CustomParams, &out.CustomParams
*out = new(CustomDeploymentStrategyParams)
if err := DeepCopy_v1_CustomDeploymentStrategyParams(*in, *out, c); err != nil {
return err
}
} else {
out.CustomParams = nil
}
if in.RecreateParams != nil {
in, out := in.RecreateParams, &out.RecreateParams
*out = new(RecreateDeploymentStrategyParams)
if err := DeepCopy_v1_RecreateDeploymentStrategyParams(*in, *out, c); err != nil {
return err
}
} else {
out.RecreateParams = nil
}
if in.RollingParams != nil {
in, out := in.RollingParams, &out.RollingParams
*out = new(RollingDeploymentStrategyParams)
if err := DeepCopy_v1_RollingDeploymentStrategyParams(*in, *out, c); err != nil {
return err
}
} else {
out.RollingParams = nil
}
if err := api_v1.DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil {
return err
}
if in.Labels != nil {
in, out := in.Labels, &out.Labels
*out = make(map[string]string)
for key, val := range in {
(*out)[key] = val
}
} else {
out.Labels = nil
}
if in.Annotations != nil {
in, out := in.Annotations, &out.Annotations
*out = make(map[string]string)
for key, val := range in {
(*out)[key] = val
}
} else {
out.Annotations = nil
}
return nil
}
func DeepCopy_v1_DeploymentTriggerImageChangeParams(in DeploymentTriggerImageChangeParams, out *DeploymentTriggerImageChangeParams, c *conversion.Cloner) error {
out.Automatic = in.Automatic
if in.ContainerNames != nil {
in, out := in.ContainerNames, &out.ContainerNames
*out = make([]string, len(in))
copy(*out, in)
} else {
out.ContainerNames = nil
}
if err := api_v1.DeepCopy_v1_ObjectReference(in.From, &out.From, c); err != nil {
return err
}
out.LastTriggeredImage = in.LastTriggeredImage
return nil
}
func DeepCopy_v1_DeploymentTriggerPolicy(in DeploymentTriggerPolicy, out *DeploymentTriggerPolicy, c *conversion.Cloner) error {
out.Type = in.Type
if in.ImageChangeParams != nil {
in, out := in.ImageChangeParams, &out.ImageChangeParams
*out = new(DeploymentTriggerImageChangeParams)
if err := DeepCopy_v1_DeploymentTriggerImageChangeParams(*in, *out, c); err != nil {
return err
}
} else {
out.ImageChangeParams = nil
}
return nil
}
func DeepCopy_v1_ExecNewPodHook(in ExecNewPodHook, out *ExecNewPodHook, c *conversion.Cloner) error {
if in.Command != nil {
in, out := in.Command, &out.Command
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Command = nil
}
if in.Env != nil {
in, out := in.Env, &out.Env
*out = make([]api_v1.EnvVar, len(in))
for i := range in {
if err := api_v1.DeepCopy_v1_EnvVar(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Env = nil
}
out.ContainerName = in.ContainerName
if in.Volumes != nil {
in, out := in.Volumes, &out.Volumes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Volumes = nil
}
return nil
}
func DeepCopy_v1_LifecycleHook(in LifecycleHook, out *LifecycleHook, c *conversion.Cloner) error {
out.FailurePolicy = in.FailurePolicy
if in.ExecNewPod != nil {
in, out := in.ExecNewPod, &out.ExecNewPod
*out = new(ExecNewPodHook)
if err := DeepCopy_v1_ExecNewPodHook(*in, *out, c); err != nil {
return err
}
} else {
out.ExecNewPod = nil
}
if in.TagImages != nil {
in, out := in.TagImages, &out.TagImages
*out = make([]TagImageHook, len(in))
for i := range in {
if err := DeepCopy_v1_TagImageHook(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.TagImages = nil
}
return nil
}
func DeepCopy_v1_RecreateDeploymentStrategyParams(in RecreateDeploymentStrategyParams, out *RecreateDeploymentStrategyParams, c *conversion.Cloner) error {
if in.TimeoutSeconds != nil {
in, out := in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int64)
**out = *in
} else {
out.TimeoutSeconds = nil
}
if in.Pre != nil {
in, out := in.Pre, &out.Pre
*out = new(LifecycleHook)
if err := DeepCopy_v1_LifecycleHook(*in, *out, c); err != nil {
return err
}
} else {
out.Pre = nil
}
if in.Mid != nil {
in, out := in.Mid, &out.Mid
*out = new(LifecycleHook)
if err := DeepCopy_v1_LifecycleHook(*in, *out, c); err != nil {
return err
}
} else {
out.Mid = nil
}
if in.Post != nil {
in, out := in.Post, &out.Post
*out = new(LifecycleHook)
if err := DeepCopy_v1_LifecycleHook(*in, *out, c); err != nil {
return err
}
} else {
out.Post = nil
}
return nil
}
func DeepCopy_v1_RollingDeploymentStrategyParams(in RollingDeploymentStrategyParams, out *RollingDeploymentStrategyParams, c *conversion.Cloner) error {
if in.UpdatePeriodSeconds != nil {
in, out := in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds
*out = new(int64)
**out = *in
} else {
out.UpdatePeriodSeconds = nil
}
if in.IntervalSeconds != nil {
in, out := in.IntervalSeconds, &out.IntervalSeconds
*out = new(int64)
**out = *in
} else {
out.IntervalSeconds = nil
}
if in.TimeoutSeconds != nil {
in, out := in.TimeoutSeconds, &out.TimeoutSeconds
*out = new(int64)
**out = *in
} else {
out.TimeoutSeconds = nil
}
if in.MaxUnavailable != nil {
in, out := in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil {
return err
}
} else {
out.MaxUnavailable = nil
}
if in.MaxSurge != nil {
in, out := in.MaxSurge, &out.MaxSurge
*out = new(intstr.IntOrString)
if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil {
return err
}
} else {
out.MaxSurge = nil
}
if in.UpdatePercent != nil {
in, out := in.UpdatePercent, &out.UpdatePercent
*out = new(int32)
**out = *in
} else {
out.UpdatePercent = nil
}
if in.Pre != nil {
in, out := in.Pre, &out.Pre
*out = new(LifecycleHook)
if err := DeepCopy_v1_LifecycleHook(*in, *out, c); err != nil {
return err
}
} else {
out.Pre = nil
}
if in.Post != nil {
in, out := in.Post, &out.Post
*out = new(LifecycleHook)
if err := DeepCopy_v1_LifecycleHook(*in, *out, c); err != nil {
return err
}
} else {
out.Post = nil
}
return nil
}
func DeepCopy_v1_TagImageHook(in TagImageHook, out *TagImageHook, c *conversion.Cloner) error {
out.ContainerName = in.ContainerName
if err := api_v1.DeepCopy_v1_ObjectReference(in.To, &out.To, c); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,127 @@
package v1
import (
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
deployapi "github.com/openshift/origin/pkg/deploy/api"
)
// Keep this in sync with pkg/api/serialization_test.go#defaultHookContainerName
func defaultHookContainerName(hook *LifecycleHook, containerName string) {
if hook == nil {
return
}
for i := range hook.TagImages {
if len(hook.TagImages[i].ContainerName) == 0 {
hook.TagImages[i].ContainerName = containerName
}
}
if hook.ExecNewPod != nil {
if len(hook.ExecNewPod.ContainerName) == 0 {
hook.ExecNewPod.ContainerName = containerName
}
}
}
func SetDefaults_DeploymentConfigSpec(obj *DeploymentConfigSpec) {
if obj.Triggers == nil {
obj.Triggers = []DeploymentTriggerPolicy{
{Type: DeploymentTriggerOnConfigChange},
}
}
if len(obj.Selector) == 0 && obj.Template != nil {
obj.Selector = obj.Template.Labels
}
// if you only specify a single container, default the TagImages hook to the container name
if obj.Template != nil && len(obj.Template.Spec.Containers) == 1 {
containerName := obj.Template.Spec.Containers[0].Name
if p := obj.Strategy.RecreateParams; p != nil {
defaultHookContainerName(p.Pre, containerName)
defaultHookContainerName(p.Mid, containerName)
defaultHookContainerName(p.Post, containerName)
}
if p := obj.Strategy.RollingParams; p != nil {
defaultHookContainerName(p.Pre, containerName)
defaultHookContainerName(p.Post, containerName)
}
}
}
func SetDefaults_DeploymentStrategy(obj *DeploymentStrategy) {
if len(obj.Type) == 0 {
obj.Type = DeploymentStrategyTypeRolling
}
if obj.Type == DeploymentStrategyTypeRolling && obj.RollingParams == nil {
obj.RollingParams = &RollingDeploymentStrategyParams{
IntervalSeconds: mkintp(deployapi.DefaultRollingIntervalSeconds),
UpdatePeriodSeconds: mkintp(deployapi.DefaultRollingUpdatePeriodSeconds),
TimeoutSeconds: mkintp(deployapi.DefaultRollingTimeoutSeconds),
}
}
if obj.Type == DeploymentStrategyTypeRecreate && obj.RecreateParams == nil {
obj.RecreateParams = &RecreateDeploymentStrategyParams{}
}
}
func SetDefaults_RecreateDeploymentStrategyParams(obj *RecreateDeploymentStrategyParams) {
if obj.TimeoutSeconds == nil {
obj.TimeoutSeconds = mkintp(deployapi.DefaultRollingTimeoutSeconds)
}
}
func SetDefaults_RollingDeploymentStrategyParams(obj *RollingDeploymentStrategyParams) {
if obj.IntervalSeconds == nil {
obj.IntervalSeconds = mkintp(deployapi.DefaultRollingIntervalSeconds)
}
if obj.UpdatePeriodSeconds == nil {
obj.UpdatePeriodSeconds = mkintp(deployapi.DefaultRollingUpdatePeriodSeconds)
}
if obj.TimeoutSeconds == nil {
obj.TimeoutSeconds = mkintp(deployapi.DefaultRollingTimeoutSeconds)
}
if obj.UpdatePercent == nil {
// Apply defaults.
if obj.MaxUnavailable == nil {
maxUnavailable := intstr.FromString("25%")
obj.MaxUnavailable = &maxUnavailable
}
if obj.MaxSurge == nil {
maxSurge := intstr.FromString("25%")
obj.MaxSurge = &maxSurge
}
}
}
func SetDefaults_DeploymentConfig(obj *DeploymentConfig) {
for _, t := range obj.Spec.Triggers {
if t.ImageChangeParams != nil {
// Default unconditionally for transforming old data.
t.ImageChangeParams.From.Kind = "ImageStreamTag"
if len(t.ImageChangeParams.From.Namespace) == 0 {
t.ImageChangeParams.From.Namespace = obj.Namespace
}
}
}
}
func mkintp(i int64) *int64 {
return &i
}
func addDefaultingFuncs(scheme *runtime.Scheme) {
err := scheme.AddDefaultingFuncs(
SetDefaults_DeploymentConfigSpec,
SetDefaults_DeploymentStrategy,
SetDefaults_RecreateDeploymentStrategyParams,
SetDefaults_RollingDeploymentStrategyParams,
SetDefaults_DeploymentConfig,
)
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,3 @@
// Package v1 is the v1 version of the API.
// +genconversion=true
package v1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,401 @@
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package github.com.openshift.origin.pkg.deploy.api.v1;
import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto";
import "k8s.io/kubernetes/pkg/api/v1/generated.proto";
import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto";
import "k8s.io/kubernetes/pkg/runtime/generated.proto";
import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
message CustomDeploymentStrategyParams {
// Image specifies a Docker image which can carry out a deployment.
optional string image = 1;
// Environment holds the environment which will be given to the container for Image.
repeated k8s.io.kubernetes.pkg.api.v1.EnvVar environment = 2;
// Command is optional and overrides CMD in the container Image.
repeated string command = 3;
}
// DeploymentCause captures information about a particular cause of a deployment.
message DeploymentCause {
// Type of the trigger that resulted in the creation of a new deployment
optional string type = 1;
// ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
optional DeploymentCauseImageTrigger imageTrigger = 2;
}
// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
// from an image change trigger
message DeploymentCauseImageTrigger {
// From is a reference to the changed object which triggered a deployment. The field may have
// the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1;
}
// DeploymentConfig represents a configuration for a single deployment (represented as a
// ReplicationController). It also contains details about changes which resulted in the current
// state of the DeploymentConfig. Each change to the DeploymentConfig which should result in
// a new deployment results in an increment of LatestVersion.
message DeploymentConfig {
// Standard object's metadata.
optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1;
// Spec represents a desired deployment state and how to deploy to it.
optional DeploymentConfigSpec spec = 2;
// Status represents the current deployment state.
optional DeploymentConfigStatus status = 3;
}
// DeploymentConfigList is a collection of deployment configs.
message DeploymentConfigList {
// Standard object's metadata.
optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1;
// Items is a list of deployment configs
repeated DeploymentConfig items = 2;
}
// DeploymentConfigRollback provides the input to rollback generation.
message DeploymentConfigRollback {
// Name of the deployment config that will be rolled back.
optional string name = 1;
// UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
map<string, string> updatedAnnotations = 2;
// Spec defines the options to rollback generation.
optional DeploymentConfigRollbackSpec spec = 3;
}
// DeploymentConfigRollbackSpec represents the options for rollback generation.
message DeploymentConfigRollbackSpec {
// From points to a ReplicationController which is a deployment.
optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1;
// Revision to rollback to. If set to 0, rollback to the last revision.
optional int64 revision = 2;
// IncludeTriggers specifies whether to include config Triggers.
optional bool includeTriggers = 3;
// IncludeTemplate specifies whether to include the PodTemplateSpec.
optional bool includeTemplate = 4;
// IncludeReplicationMeta specifies whether to include the replica count and selector.
optional bool includeReplicationMeta = 5;
// IncludeStrategy specifies whether to include the deployment Strategy.
optional bool includeStrategy = 6;
}
// DeploymentConfigSpec represents the desired state of the deployment.
message DeploymentConfigSpec {
// Strategy describes how a deployment is executed.
optional DeploymentStrategy strategy = 1;
// MinReadySeconds is the minimum number of seconds for which a newly created pod should
// be ready without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
optional int32 minReadySeconds = 9;
// Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
// are defined, a new deployment can only occur as a result of an explicit client update to the
// DeploymentConfig with a new LatestVersion.
repeated DeploymentTriggerPolicy triggers = 2;
// Replicas is the number of desired replicas.
optional int32 replicas = 3;
// RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
// This field is a pointer to allow for differentiation between an explicit zero and not specified.
optional int32 revisionHistoryLimit = 4;
// Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
// deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
// or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
optional bool test = 5;
// Paused indicates that the deployment config is paused resulting in no new deployments on template
// changes or changes in the template caused by other triggers.
optional bool paused = 6;
// Selector is a label query over pods that should match the Replicas count.
map<string, string> selector = 7;
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 8;
}
// DeploymentConfigStatus represents the current deployment state.
message DeploymentConfigStatus {
// LatestVersion is used to determine whether the current deployment associated with a deployment
// config is out of sync.
optional int64 latestVersion = 1;
// ObservedGeneration is the most recent generation observed by the deployment config controller.
optional int64 observedGeneration = 2;
// Replicas is the total number of pods targeted by this deployment config.
optional int32 replicas = 3;
// UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
// that have the desired template spec.
optional int32 updatedReplicas = 4;
// AvailableReplicas is the total number of available pods targeted by this deployment config.
optional int32 availableReplicas = 5;
// UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
optional int32 unavailableReplicas = 6;
// Details are the reasons for the update to this deployment config.
// This could be based on a change made by the user or caused by an automatic trigger
optional DeploymentDetails details = 7;
}
// DeploymentDetails captures information about the causes of a deployment.
message DeploymentDetails {
// Message is the user specified change message, if this deployment was triggered manually by the user
optional string message = 1;
// Causes are extended data associated with all the causes for creating a new deployment
repeated DeploymentCause causes = 2;
}
// DeploymentLog represents the logs for a deployment
message DeploymentLog {
}
// DeploymentLogOptions is the REST options for a deployment log
message DeploymentLogOptions {
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
optional string container = 1;
// Follow if true indicates that the build log should be streamed until
// the build terminates.
optional bool follow = 2;
// Return previous deployment logs. Defaults to false.
optional bool previous = 3;
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
optional int64 sinceSeconds = 4;
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5;
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
optional bool timestamps = 6;
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
optional int64 tailLines = 7;
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
optional int64 limitBytes = 8;
// NoWait if true causes the call to return immediately even if the deployment
// is not available yet. Otherwise the server will wait until the deployment has started.
// TODO: Fix the tag to 'noWait' in v2
optional bool nowait = 9;
// Version of the deployment for which to view logs.
optional int64 version = 10;
}
// DeploymentStrategy describes how to perform a deployment.
message DeploymentStrategy {
// Type is the name of a deployment strategy.
optional string type = 1;
// CustomParams are the input to the Custom deployment strategy.
optional CustomDeploymentStrategyParams customParams = 2;
// RecreateParams are the input to the Recreate deployment strategy.
optional RecreateDeploymentStrategyParams recreateParams = 3;
// RollingParams are the input to the Rolling deployment strategy.
optional RollingDeploymentStrategyParams rollingParams = 4;
// Resources contains resource requirements to execute the deployment and any hooks
optional k8s.io.kubernetes.pkg.api.v1.ResourceRequirements resources = 5;
// Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
map<string, string> labels = 6;
// Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
map<string, string> annotations = 7;
}
// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
message DeploymentTriggerImageChangeParams {
// Automatic means that the detection of a new tag value should result in an image update
// inside the pod template. Deployment configs that haven't been deployed yet will always
// have their images updated. Deployment configs that have been deployed at least once, will
// have their images updated only if this is set to true.
optional bool automatic = 1;
// ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
repeated string containerNames = 2;
// From is a reference to an image stream tag to watch for changes. From.Name is the only
// required subfield - if From.Namespace is blank, the namespace of the current deployment
// trigger will be used.
optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 3;
// LastTriggeredImage is the last image to be triggered.
optional string lastTriggeredImage = 4;
}
// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
message DeploymentTriggerPolicy {
// Type of the trigger
optional string type = 1;
// ImageChangeParams represents the parameters for the ImageChange trigger.
optional DeploymentTriggerImageChangeParams imageChangeParams = 2;
}
// ExecNewPodHook is a hook implementation which runs a command in a new pod
// based on the specified container which is assumed to be part of the
// deployment template.
message ExecNewPodHook {
// Command is the action command and its arguments.
repeated string command = 1;
// Env is a set of environment variables to supply to the hook pod's container.
repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 2;
// ContainerName is the name of a container in the deployment pod template
// whose Docker image will be used for the hook pod's container.
optional string containerName = 3;
// Volumes is a list of named volumes from the pod template which should be
// copied to the hook pod. Volumes names not found in pod spec are ignored.
// An empty list means no volumes will be copied.
repeated string volumes = 4;
}
// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
message LifecycleHook {
// FailurePolicy specifies what action to take if the hook fails.
optional string failurePolicy = 1;
// ExecNewPod specifies the options for a lifecycle hook backed by a pod.
optional ExecNewPodHook execNewPod = 2;
// TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
repeated TagImageHook tagImages = 3;
}
// RecreateDeploymentStrategyParams are the input to the Recreate deployment
// strategy.
message RecreateDeploymentStrategyParams {
// TimeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
optional int64 timeoutSeconds = 1;
// Pre is a lifecycle hook which is executed before the strategy manipulates
// the deployment. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook pre = 2;
// Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
// pod is created. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook mid = 3;
// Post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook post = 4;
}
// RollingDeploymentStrategyParams are the input to the Rolling deployment
// strategy.
message RollingDeploymentStrategyParams {
// UpdatePeriodSeconds is the time to wait between individual pod updates.
// If the value is nil, a default will be used.
optional int64 updatePeriodSeconds = 1;
// IntervalSeconds is the time to wait between polling deployment status
// after update. If the value is nil, a default will be used.
optional int64 intervalSeconds = 2;
// TimeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
optional int64 timeoutSeconds = 3;
// MaxUnavailable is the maximum number of pods that can be unavailable
// during the update. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
//
// This cannot be 0 if MaxSurge is 0. By default, 25% is used.
//
// Example: when this is set to 30%, the old RC can be scaled down by 30%
// immediately when the rolling update starts. Once new pods are ready, old
// RC can be scaled down further, followed by scaling up the new RC,
// ensuring that at least 70% of original number of pods are available at
// all times during the update.
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 4;
// MaxSurge is the maximum number of pods that can be scheduled above the
// original number of pods. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
//
// This cannot be 0 if MaxUnavailable is 0. By default, 25% is used.
//
// Example: when this is set to 30%, the new RC can be scaled up by 30%
// immediately when the rolling update starts. Once old pods have been
// killed, new RC can be scaled up further, ensuring that total number of
// pods running at any time during the update is atmost 130% of original
// pods.
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 5;
// UpdatePercent is the percentage of replicas to scale up or down each
// interval. If nil, one replica will be scaled up and down each interval.
// If negative, the scale order will be down/up instead of up/down.
// DEPRECATED: Use MaxUnavailable/MaxSurge instead.
optional int32 updatePercent = 6;
// Pre is a lifecycle hook which is executed before the deployment process
// begins. All LifecycleHookFailurePolicy values are supported.
optional LifecycleHook pre = 7;
// Post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. The LifecycleHookFailurePolicyAbort policy
// is NOT supported.
optional LifecycleHook post = 8;
}
// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
message TagImageHook {
// ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
// container this value will be defaulted to the name of that container.
optional string containerName = 1;
// To is the target ImageStreamTag to set the container's image onto.
optional k8s.io.kubernetes.pkg.api.v1.ObjectReference to = 2;
}

View File

@ -0,0 +1,34 @@
package v1
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"}
func AddToScheme(scheme *runtime.Scheme) {
addKnownTypes(scheme)
addDefaultingFuncs(scheme)
addConversionFuncs(scheme)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) {
scheme.AddKnownTypes(SchemeGroupVersion,
&DeploymentConfig{},
&DeploymentConfigList{},
&DeploymentConfigRollback{},
&DeploymentLog{},
&DeploymentLogOptions{},
)
}
func (obj *DeploymentConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentConfigList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentConfigRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentLog) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *DeploymentLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -0,0 +1,248 @@
package v1
// This file contains methods that can be used by the go-restful package to generate Swagger
// documentation for the object types found in 'types.go' This file is automatically generated
// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift.
// ==== DO NOT EDIT THIS FILE MANUALLY ====
var map_CustomDeploymentStrategyParams = map[string]string{
"": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.",
"image": "Image specifies a Docker image which can carry out a deployment.",
"environment": "Environment holds the environment which will be given to the container for Image.",
"command": "Command is optional and overrides CMD in the container Image.",
}
func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string {
return map_CustomDeploymentStrategyParams
}
var map_DeploymentCause = map[string]string{
"": "DeploymentCause captures information about a particular cause of a deployment.",
"type": "Type of the trigger that resulted in the creation of a new deployment",
"imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change",
}
func (DeploymentCause) SwaggerDoc() map[string]string {
return map_DeploymentCause
}
var map_DeploymentCauseImageTrigger = map[string]string{
"": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger",
"from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.",
}
func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string {
return map_DeploymentCauseImageTrigger
}
var map_DeploymentConfig = map[string]string{
"": "DeploymentConfig represents a configuration for a single deployment (represented as a ReplicationController). It also contains details about changes which resulted in the current state of the DeploymentConfig. Each change to the DeploymentConfig which should result in a new deployment results in an increment of LatestVersion.",
"metadata": "Standard object's metadata.",
"spec": "Spec represents a desired deployment state and how to deploy to it.",
"status": "Status represents the current deployment state.",
}
func (DeploymentConfig) SwaggerDoc() map[string]string {
return map_DeploymentConfig
}
var map_DeploymentConfigList = map[string]string{
"": "DeploymentConfigList is a collection of deployment configs.",
"metadata": "Standard object's metadata.",
"items": "Items is a list of deployment configs",
}
func (DeploymentConfigList) SwaggerDoc() map[string]string {
return map_DeploymentConfigList
}
var map_DeploymentConfigRollback = map[string]string{
"": "DeploymentConfigRollback provides the input to rollback generation.",
"name": "Name of the deployment config that will be rolled back.",
"updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.",
"spec": "Spec defines the options to rollback generation.",
}
func (DeploymentConfigRollback) SwaggerDoc() map[string]string {
return map_DeploymentConfigRollback
}
var map_DeploymentConfigRollbackSpec = map[string]string{
"": "DeploymentConfigRollbackSpec represents the options for rollback generation.",
"from": "From points to a ReplicationController which is a deployment.",
"revision": "Revision to rollback to. If set to 0, rollback to the last revision.",
"includeTriggers": "IncludeTriggers specifies whether to include config Triggers.",
"includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.",
"includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.",
"includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.",
}
func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string {
return map_DeploymentConfigRollbackSpec
}
var map_DeploymentConfigSpec = map[string]string{
"": "DeploymentConfigSpec represents the desired state of the deployment.",
"strategy": "Strategy describes how a deployment is executed.",
"minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
"triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion.",
"replicas": "Replicas is the number of desired replicas.",
"revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified.",
"test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.",
"paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.",
"selector": "Selector is a label query over pods that should match the Replicas count.",
"template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.",
}
func (DeploymentConfigSpec) SwaggerDoc() map[string]string {
return map_DeploymentConfigSpec
}
var map_DeploymentConfigStatus = map[string]string{
"": "DeploymentConfigStatus represents the current deployment state.",
"latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.",
"observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.",
"replicas": "Replicas is the total number of pods targeted by this deployment config.",
"updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.",
"availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.",
"unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.",
"details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger",
}
func (DeploymentConfigStatus) SwaggerDoc() map[string]string {
return map_DeploymentConfigStatus
}
var map_DeploymentDetails = map[string]string{
"": "DeploymentDetails captures information about the causes of a deployment.",
"message": "Message is the user specified change message, if this deployment was triggered manually by the user",
"causes": "Causes are extended data associated with all the causes for creating a new deployment",
}
func (DeploymentDetails) SwaggerDoc() map[string]string {
return map_DeploymentDetails
}
var map_DeploymentLog = map[string]string{
"": "DeploymentLog represents the logs for a deployment",
}
func (DeploymentLog) SwaggerDoc() map[string]string {
return map_DeploymentLog
}
var map_DeploymentLogOptions = map[string]string{
"": "DeploymentLogOptions is the REST options for a deployment log",
"container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
"follow": "Follow if true indicates that the build log should be streamed until the build terminates.",
"previous": "Return previous deployment logs. Defaults to false.",
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
"nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.",
"version": "Version of the deployment for which to view logs.",
}
func (DeploymentLogOptions) SwaggerDoc() map[string]string {
return map_DeploymentLogOptions
}
var map_DeploymentStrategy = map[string]string{
"": "DeploymentStrategy describes how to perform a deployment.",
"type": "Type is the name of a deployment strategy.",
"customParams": "CustomParams are the input to the Custom deployment strategy.",
"recreateParams": "RecreateParams are the input to the Recreate deployment strategy.",
"rollingParams": "RollingParams are the input to the Rolling deployment strategy.",
"resources": "Resources contains resource requirements to execute the deployment and any hooks",
"labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
"annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.",
}
func (DeploymentStrategy) SwaggerDoc() map[string]string {
return map_DeploymentStrategy
}
var map_DeploymentTriggerImageChangeParams = map[string]string{
"": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.",
"automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template. Deployment configs that haven't been deployed yet will always have their images updated. Deployment configs that have been deployed at least once, will have their images updated only if this is set to true.",
"containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod.",
"from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.",
"lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.",
}
func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string {
return map_DeploymentTriggerImageChangeParams
}
var map_DeploymentTriggerPolicy = map[string]string{
"": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.",
"type": "Type of the trigger",
"imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.",
}
func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string {
return map_DeploymentTriggerPolicy
}
var map_ExecNewPodHook = map[string]string{
"": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.",
"command": "Command is the action command and its arguments.",
"env": "Env is a set of environment variables to supply to the hook pod's container.",
"containerName": "ContainerName is the name of a container in the deployment pod template whose Docker image will be used for the hook pod's container.",
"volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.",
}
func (ExecNewPodHook) SwaggerDoc() map[string]string {
return map_ExecNewPodHook
}
var map_LifecycleHook = map[string]string{
"": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.",
"failurePolicy": "FailurePolicy specifies what action to take if the hook fails.",
"execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.",
"tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.",
}
func (LifecycleHook) SwaggerDoc() map[string]string {
return map_LifecycleHook
}
var map_RecreateDeploymentStrategyParams = map[string]string{
"": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.",
"timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
"pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.",
"mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.",
"post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.",
}
func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string {
return map_RecreateDeploymentStrategyParams
}
var map_RollingDeploymentStrategyParams = map[string]string{
"": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.",
"updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.",
"intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.",
"timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.",
"maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.",
"maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.",
"updatePercent": "UpdatePercent is the percentage of replicas to scale up or down each interval. If nil, one replica will be scaled up and down each interval. If negative, the scale order will be down/up instead of up/down. DEPRECATED: Use MaxUnavailable/MaxSurge instead.",
"pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.",
"post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. The LifecycleHookFailurePolicyAbort policy is NOT supported.",
}
func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string {
return map_RollingDeploymentStrategyParams
}
var map_TagImageHook = map[string]string{
"": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.",
"containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.",
"to": "To is the target ImageStreamTag to set the container's image onto.",
}
func (TagImageHook) SwaggerDoc() map[string]string {
return map_TagImageHook
}

View File

@ -0,0 +1,457 @@
package v1
import (
"k8s.io/kubernetes/pkg/api/unversioned"
kapi "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
)
// DeploymentPhase describes the possible states a deployment can be in.
type DeploymentPhase string
const (
// DeploymentPhaseNew means the deployment has been accepted but not yet acted upon.
DeploymentPhaseNew DeploymentPhase = "New"
// DeploymentPhasePending means the deployment been handed over to a deployment strategy,
// but the strategy has not yet declared the deployment to be running.
DeploymentPhasePending DeploymentPhase = "Pending"
// DeploymentPhaseRunning means the deployment strategy has reported the deployment as
// being in-progress.
DeploymentPhaseRunning DeploymentPhase = "Running"
// DeploymentPhaseComplete means the deployment finished without an error.
DeploymentPhaseComplete DeploymentPhase = "Complete"
// DeploymentPhaseFailed means the deployment finished with an error.
DeploymentPhaseFailed DeploymentPhase = "Failed"
)
// DeploymentStrategy describes how to perform a deployment.
type DeploymentStrategy struct {
// Type is the name of a deployment strategy.
Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"`
// CustomParams are the input to the Custom deployment strategy.
CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"`
// RecreateParams are the input to the Recreate deployment strategy.
RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"`
// RollingParams are the input to the Rolling deployment strategy.
RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"`
// Resources contains resource requirements to execute the deployment and any hooks
Resources kapi.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"`
// Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"`
// Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"`
}
// DeploymentStrategyType refers to a specific DeploymentStrategy implementation.
type DeploymentStrategyType string
const (
// DeploymentStrategyTypeRecreate is a simple strategy suitable as a default.
DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate"
// DeploymentStrategyTypeCustom is a user defined strategy.
DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom"
// DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater.
DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling"
)
// CustomDeploymentStrategyParams are the input to the Custom deployment strategy.
type CustomDeploymentStrategyParams struct {
// Image specifies a Docker image which can carry out a deployment.
Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
// Environment holds the environment which will be given to the container for Image.
Environment []kapi.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"`
// Command is optional and overrides CMD in the container Image.
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
}
// RecreateDeploymentStrategyParams are the input to the Recreate deployment
// strategy.
type RecreateDeploymentStrategyParams struct {
// TimeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
// Pre is a lifecycle hook which is executed before the strategy manipulates
// the deployment. All LifecycleHookFailurePolicy values are supported.
Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"`
// Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new
// pod is created. All LifecycleHookFailurePolicy values are supported.
Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"`
// Post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. All LifecycleHookFailurePolicy values are supported.
Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"`
}
// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.
type LifecycleHook struct {
// FailurePolicy specifies what action to take if the hook fails.
FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"`
// ExecNewPod specifies the options for a lifecycle hook backed by a pod.
ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"`
// TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.
TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"`
}
// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails.
type LifecycleHookFailurePolicy string
const (
// LifecycleHookFailurePolicyRetry means retry the hook until it succeeds.
LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry"
// LifecycleHookFailurePolicyAbort means abort the deployment (if possible).
LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort"
// LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment.
LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore"
)
// ExecNewPodHook is a hook implementation which runs a command in a new pod
// based on the specified container which is assumed to be part of the
// deployment template.
type ExecNewPodHook struct {
// Command is the action command and its arguments.
Command []string `json:"command" protobuf:"bytes,1,rep,name=command"`
// Env is a set of environment variables to supply to the hook pod's container.
Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"`
// ContainerName is the name of a container in the deployment pod template
// whose Docker image will be used for the hook pod's container.
ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"`
// Volumes is a list of named volumes from the pod template which should be
// copied to the hook pod. Volumes names not found in pod spec are ignored.
// An empty list means no volumes will be copied.
Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"`
}
// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.
type TagImageHook struct {
// ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single
// container this value will be defaulted to the name of that container.
ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"`
// To is the target ImageStreamTag to set the container's image onto.
To kapi.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"`
}
// RollingDeploymentStrategyParams are the input to the Rolling deployment
// strategy.
type RollingDeploymentStrategyParams struct {
// UpdatePeriodSeconds is the time to wait between individual pod updates.
// If the value is nil, a default will be used.
UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"`
// IntervalSeconds is the time to wait between polling deployment status
// after update. If the value is nil, a default will be used.
IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"`
// TimeoutSeconds is the time to wait for updates before giving up. If the
// value is nil, a default will be used.
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// MaxUnavailable is the maximum number of pods that can be unavailable
// during the update. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
//
// This cannot be 0 if MaxSurge is 0. By default, 25% is used.
//
// Example: when this is set to 30%, the old RC can be scaled down by 30%
// immediately when the rolling update starts. Once new pods are ready, old
// RC can be scaled down further, followed by scaling up the new RC,
// ensuring that at least 70% of original number of pods are available at
// all times during the update.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"`
// MaxSurge is the maximum number of pods that can be scheduled above the
// original number of pods. Value can be an absolute number (ex: 5) or a
// percentage of total pods at the start of the update (ex: 10%). Absolute
// number is calculated from percentage by rounding up.
//
// This cannot be 0 if MaxUnavailable is 0. By default, 25% is used.
//
// Example: when this is set to 30%, the new RC can be scaled up by 30%
// immediately when the rolling update starts. Once old pods have been
// killed, new RC can be scaled up further, ensuring that total number of
// pods running at any time during the update is atmost 130% of original
// pods.
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"`
// UpdatePercent is the percentage of replicas to scale up or down each
// interval. If nil, one replica will be scaled up and down each interval.
// If negative, the scale order will be down/up instead of up/down.
// DEPRECATED: Use MaxUnavailable/MaxSurge instead.
UpdatePercent *int32 `json:"updatePercent,omitempty" protobuf:"varint,6,opt,name=updatePercent"`
// Pre is a lifecycle hook which is executed before the deployment process
// begins. All LifecycleHookFailurePolicy values are supported.
Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"`
// Post is a lifecycle hook which is executed after the strategy has
// finished all deployment logic. The LifecycleHookFailurePolicyAbort policy
// is NOT supported.
Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"`
}
// These constants represent keys used for correlating objects related to deployments.
const (
// DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the
// DeploymentConfig on which the deployment is based.
DeploymentConfigAnnotation = "openshift.io/deployment-config.name"
// DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name
// of the deployment (a ReplicationController) on which the deployer Pod acts.
DeploymentAnnotation = "openshift.io/deployment.name"
// DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The
// annotation value is the name of the deployer Pod which will act upon the ReplicationController
// to implement the deployment behavior.
DeploymentPodAnnotation = "openshift.io/deployer-pod.name"
// DeploymentPodTypeLabel is a label with which contains a type of deployment pod.
DeploymentPodTypeLabel = "openshift.io/deployer-pod.type"
// DeployerPodForDeploymentLabel is a label which groups pods related to a
// deployment. The value is a deployment name. The deployer pod and hook pods
// created by the internal strategies will have this label. Custom
// strategies can apply this label to any pods they create, enabling
// platform-provided cancellation and garbage collection support.
DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name"
// DeploymentPhaseAnnotation is an annotation name used to retrieve the DeploymentPhase of
// a deployment.
DeploymentPhaseAnnotation = "openshift.io/deployment.phase"
// DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded
// DeploymentConfig on which a given deployment is based.
DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config"
// DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The
// annotation value is the LatestVersion value of the DeploymentConfig which was the basis for
// the deployment.
DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version"
// DeploymentLabel is the name of a label used to correlate a deployment with the Pod created
// to execute the deployment logic.
// TODO: This is a workaround for upstream's lack of annotation support on PodTemplate. Once
// annotations are available on PodTemplate, audit this constant with the goal of removing it.
DeploymentLabel = "deployment"
// DeploymentConfigLabel is the name of a label used to correlate a deployment with the
// DeploymentConfigs on which the deployment is based.
DeploymentConfigLabel = "deploymentconfig"
// DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state
// Used for specifying the reason for cancellation or failure of a deployment
DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason"
// DeploymentCancelledAnnotation indicates that the deployment has been cancelled
// The annotation value does not matter and its mere presence indicates cancellation
DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled"
// DeploymentInstantiatedAnnotation indicates that the deployment has been instantiated.
// The annotation value does not matter and its mere presence indicates instantiation.
DeploymentInstantiatedAnnotation = "openshift.io/deployment.instantiated"
)
// +genclient=true
// DeploymentConfig represents a configuration for a single deployment (represented as a
// ReplicationController). It also contains details about changes which resulted in the current
// state of the DeploymentConfig. Each change to the DeploymentConfig which should result in
// a new deployment results in an increment of LatestVersion.
type DeploymentConfig struct {
unversioned.TypeMeta `json:",inline"`
// Standard object's metadata.
kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec represents a desired deployment state and how to deploy to it.
Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current deployment state.
Status DeploymentConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
}
// DeploymentConfigSpec represents the desired state of the deployment.
type DeploymentConfigSpec struct {
// Strategy describes how a deployment is executed.
Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"`
// MinReadySeconds is the minimum number of seconds for which a newly created pod should
// be ready without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
// Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers
// are defined, a new deployment can only occur as a result of an explicit client update to the
// DeploymentConfig with a new LatestVersion.
Triggers []DeploymentTriggerPolicy `json:"triggers" protobuf:"bytes,2,rep,name=triggers"`
// Replicas is the number of desired replicas.
Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"`
// RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks.
// This field is a pointer to allow for differentiation between an explicit zero and not specified.
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"`
// Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the
// deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding
// or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.
Test bool `json:"test" protobuf:"varint,5,opt,name=test"`
// Paused indicates that the deployment config is paused resulting in no new deployments on template
// changes or changes in the template caused by other triggers.
Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"`
// Selector is a label query over pods that should match the Replicas count.
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
Template *kapi.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"`
}
// DeploymentConfigStatus represents the current deployment state.
type DeploymentConfigStatus struct {
// LatestVersion is used to determine whether the current deployment associated with a deployment
// config is out of sync.
LatestVersion int64 `json:"latestVersion,omitempty" protobuf:"varint,1,opt,name=latestVersion"`
// ObservedGeneration is the most recent generation observed by the deployment config controller.
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"`
// Replicas is the total number of pods targeted by this deployment config.
Replicas int32 `json:"replicas,omitempty" protobuf:"varint,3,opt,name=replicas"`
// UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config
// that have the desired template spec.
UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,4,opt,name=updatedReplicas"`
// AvailableReplicas is the total number of available pods targeted by this deployment config.
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.
UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,6,opt,name=unavailableReplicas"`
// Details are the reasons for the update to this deployment config.
// This could be based on a change made by the user or caused by an automatic trigger
Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"`
}
// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.
type DeploymentTriggerPolicy struct {
// Type of the trigger
Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
// ImageChangeParams represents the parameters for the ImageChange trigger.
ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"`
}
// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation.
type DeploymentTriggerType string
const (
// DeploymentTriggerOnImageChange will create new deployments in response to updated tags from
// a Docker image repository.
DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange"
// DeploymentTriggerOnConfigChange will create new deployments in response to changes to
// the ControllerTemplate of a DeploymentConfig.
DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange"
)
// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.
type DeploymentTriggerImageChangeParams struct {
// Automatic means that the detection of a new tag value should result in an image update
// inside the pod template. Deployment configs that haven't been deployed yet will always
// have their images updated. Deployment configs that have been deployed at least once, will
// have their images updated only if this is set to true.
Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"`
// ContainerNames is used to restrict tag updates to the specified set of container names in a pod.
ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"`
// From is a reference to an image stream tag to watch for changes. From.Name is the only
// required subfield - if From.Namespace is blank, the namespace of the current deployment
// trigger will be used.
From kapi.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"`
// LastTriggeredImage is the last image to be triggered.
LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"`
}
// DeploymentDetails captures information about the causes of a deployment.
type DeploymentDetails struct {
// Message is the user specified change message, if this deployment was triggered manually by the user
Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"`
// Causes are extended data associated with all the causes for creating a new deployment
Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"`
}
// DeploymentCause captures information about a particular cause of a deployment.
type DeploymentCause struct {
// Type of the trigger that resulted in the creation of a new deployment
Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"`
// ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"`
}
// DeploymentCauseImageTrigger represents details about the cause of a deployment originating
// from an image change trigger
type DeploymentCauseImageTrigger struct {
// From is a reference to the changed object which triggered a deployment. The field may have
// the kinds DockerImage, ImageStreamTag, or ImageStreamImage.
From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
}
// DeploymentConfigList is a collection of deployment configs.
type DeploymentConfigList struct {
unversioned.TypeMeta `json:",inline"`
// Standard object's metadata.
unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of deployment configs
Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DeploymentConfigRollback provides the input to rollback generation.
type DeploymentConfigRollback struct {
unversioned.TypeMeta `json:",inline"`
// Name of the deployment config that will be rolled back.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// UpdatedAnnotations is a set of new annotations that will be added in the deployment config.
UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"`
// Spec defines the options to rollback generation.
Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"`
}
// DeploymentConfigRollbackSpec represents the options for rollback generation.
type DeploymentConfigRollbackSpec struct {
// From points to a ReplicationController which is a deployment.
From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
// Revision to rollback to. If set to 0, rollback to the last revision.
Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"`
// IncludeTriggers specifies whether to include config Triggers.
IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"`
// IncludeTemplate specifies whether to include the PodTemplateSpec.
IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"`
// IncludeReplicationMeta specifies whether to include the replica count and selector.
IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"`
// IncludeStrategy specifies whether to include the deployment Strategy.
IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"`
}
// DeploymentLog represents the logs for a deployment
type DeploymentLog struct {
unversioned.TypeMeta `json:",inline"`
}
// DeploymentLogOptions is the REST options for a deployment log
type DeploymentLogOptions struct {
unversioned.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow if true indicates that the build log should be streamed until
// the build terminates.
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous deployment logs. Defaults to false.
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
// NoWait if true causes the call to return immediately even if the deployment
// is not available yet. Otherwise the server will wait until the deployment has started.
// TODO: Fix the tag to 'noWait' in v2
NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
// Version of the deployment for which to view logs.
Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"`
}

View File

@ -0,0 +1,257 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package api
import (
api "k8s.io/kubernetes/pkg/api"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
conversion "k8s.io/kubernetes/pkg/conversion"
)
func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_api_ClusterRoleScopeRestriction,
DeepCopy_api_OAuthAccessToken,
DeepCopy_api_OAuthAccessTokenList,
DeepCopy_api_OAuthAuthorizeToken,
DeepCopy_api_OAuthAuthorizeTokenList,
DeepCopy_api_OAuthClient,
DeepCopy_api_OAuthClientAuthorization,
DeepCopy_api_OAuthClientAuthorizationList,
DeepCopy_api_OAuthClientList,
DeepCopy_api_ScopeRestriction,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
func DeepCopy_api_ClusterRoleScopeRestriction(in ClusterRoleScopeRestriction, out *ClusterRoleScopeRestriction, c *conversion.Cloner) error {
if in.RoleNames != nil {
in, out := in.RoleNames, &out.RoleNames
*out = make([]string, len(in))
copy(*out, in)
} else {
out.RoleNames = nil
}
if in.Namespaces != nil {
in, out := in.Namespaces, &out.Namespaces
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Namespaces = nil
}
out.AllowEscalation = in.AllowEscalation
return nil
}
func DeepCopy_api_OAuthAccessToken(in OAuthAccessToken, out *OAuthAccessToken, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.ClientName = in.ClientName
out.ExpiresIn = in.ExpiresIn
if in.Scopes != nil {
in, out := in.Scopes, &out.Scopes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Scopes = nil
}
out.RedirectURI = in.RedirectURI
out.UserName = in.UserName
out.UserUID = in.UserUID
out.AuthorizeToken = in.AuthorizeToken
out.RefreshToken = in.RefreshToken
return nil
}
func DeepCopy_api_OAuthAccessTokenList(in OAuthAccessTokenList, out *OAuthAccessTokenList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]OAuthAccessToken, len(in))
for i := range in {
if err := DeepCopy_api_OAuthAccessToken(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_OAuthAuthorizeToken(in OAuthAuthorizeToken, out *OAuthAuthorizeToken, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.ClientName = in.ClientName
out.ExpiresIn = in.ExpiresIn
if in.Scopes != nil {
in, out := in.Scopes, &out.Scopes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Scopes = nil
}
out.RedirectURI = in.RedirectURI
out.State = in.State
out.UserName = in.UserName
out.UserUID = in.UserUID
return nil
}
func DeepCopy_api_OAuthAuthorizeTokenList(in OAuthAuthorizeTokenList, out *OAuthAuthorizeTokenList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]OAuthAuthorizeToken, len(in))
for i := range in {
if err := DeepCopy_api_OAuthAuthorizeToken(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_OAuthClient(in OAuthClient, out *OAuthClient, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.Secret = in.Secret
if in.AdditionalSecrets != nil {
in, out := in.AdditionalSecrets, &out.AdditionalSecrets
*out = make([]string, len(in))
copy(*out, in)
} else {
out.AdditionalSecrets = nil
}
out.RespondWithChallenges = in.RespondWithChallenges
if in.RedirectURIs != nil {
in, out := in.RedirectURIs, &out.RedirectURIs
*out = make([]string, len(in))
copy(*out, in)
} else {
out.RedirectURIs = nil
}
out.GrantMethod = in.GrantMethod
if in.ScopeRestrictions != nil {
in, out := in.ScopeRestrictions, &out.ScopeRestrictions
*out = make([]ScopeRestriction, len(in))
for i := range in {
if err := DeepCopy_api_ScopeRestriction(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.ScopeRestrictions = nil
}
return nil
}
func DeepCopy_api_OAuthClientAuthorization(in OAuthClientAuthorization, out *OAuthClientAuthorization, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.ClientName = in.ClientName
out.UserName = in.UserName
out.UserUID = in.UserUID
if in.Scopes != nil {
in, out := in.Scopes, &out.Scopes
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Scopes = nil
}
return nil
}
func DeepCopy_api_OAuthClientAuthorizationList(in OAuthClientAuthorizationList, out *OAuthClientAuthorizationList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]OAuthClientAuthorization, len(in))
for i := range in {
if err := DeepCopy_api_OAuthClientAuthorization(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_OAuthClientList(in OAuthClientList, out *OAuthClientList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]OAuthClient, len(in))
for i := range in {
if err := DeepCopy_api_OAuthClient(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_ScopeRestriction(in ScopeRestriction, out *ScopeRestriction, c *conversion.Cloner) error {
if in.ExactValues != nil {
in, out := in.ExactValues, &out.ExactValues
*out = make([]string, len(in))
copy(*out, in)
} else {
out.ExactValues = nil
}
if in.ClusterRole != nil {
in, out := in.ClusterRole, &out.ClusterRole
*out = new(ClusterRoleScopeRestriction)
if err := DeepCopy_api_ClusterRoleScopeRestriction(*in, *out, c); err != nil {
return err
}
} else {
out.ClusterRole = nil
}
return nil
}

View File

@ -0,0 +1,41 @@
package api
import "k8s.io/kubernetes/pkg/fields"
// OAuthAccessTokenToSelectableFields returns a label set that represents the object
func OAuthAccessTokenToSelectableFields(obj *OAuthAccessToken) fields.Set {
return fields.Set{
"metadata.name": obj.Name,
"clientName": obj.ClientName,
"userName": obj.UserName,
"userUID": obj.UserUID,
"authorizeToken": obj.AuthorizeToken,
}
}
// OAuthAuthorizeTokenToSelectableFields returns a label set that represents the object
func OAuthAuthorizeTokenToSelectableFields(obj *OAuthAuthorizeToken) fields.Set {
return fields.Set{
"metadata.name": obj.Name,
"clientName": obj.ClientName,
"userName": obj.UserName,
"userUID": obj.UserUID,
}
}
// OAuthClientToSelectableFields returns a label set that represents the object
func OAuthClientToSelectableFields(obj *OAuthClient) fields.Set {
return fields.Set{
"metadata.name": obj.Name,
}
}
// OAuthClientAuthorizationToSelectableFields returns a label set that represents the object
func OAuthClientAuthorizationToSelectableFields(obj *OAuthClientAuthorization) fields.Set {
return fields.Set{
"metadata.name": obj.Name,
"clientName": obj.ClientName,
"userName": obj.UserName,
"userUID": obj.UserUID,
}
}

View File

@ -0,0 +1,49 @@
package api
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) unversioned.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) unversioned.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func AddToScheme(scheme *runtime.Scheme) {
// Add the API to Scheme.
addKnownTypes(scheme)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) {
scheme.AddKnownTypes(SchemeGroupVersion,
&OAuthAccessToken{},
&OAuthAccessTokenList{},
&OAuthAuthorizeToken{},
&OAuthAuthorizeTokenList{},
&OAuthClient{},
&OAuthClientList{},
&OAuthClientAuthorization{},
&OAuthClientAuthorizationList{},
)
}
func (obj *OAuthClientAuthorizationList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthClientAuthorization) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthClientList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthClient) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthAuthorizeTokenList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthAuthorizeToken) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthAccessTokenList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *OAuthAccessToken) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -0,0 +1,163 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
)
type OAuthAccessToken struct {
unversioned.TypeMeta
kapi.ObjectMeta
// ClientName references the client that created this token.
ClientName string
// ExpiresIn is the seconds from CreationTime before this token expires.
ExpiresIn int64
// Scopes is an array of the requested scopes.
Scopes []string
// RedirectURI is the redirection associated with the token.
RedirectURI string
// UserName is the user name associated with this token
UserName string
// UserUID is the unique UID associated with this token
UserUID string
// AuthorizeToken contains the token that authorized this token
AuthorizeToken string
// RefreshToken is the value by which this token can be renewed. Can be blank.
RefreshToken string
}
type OAuthAuthorizeToken struct {
unversioned.TypeMeta
kapi.ObjectMeta
// ClientName references the client that created this token.
ClientName string
// ExpiresIn is the seconds from CreationTime before this token expires.
ExpiresIn int64
// Scopes is an array of the requested scopes.
Scopes []string
// RedirectURI is the redirection associated with the token.
RedirectURI string
// State data from request
State string
// UserName is the user name associated with this token
UserName string
// UserUID is the unique UID associated with this token. UserUID and UserName must both match
// for this token to be valid.
UserUID string
}
// +genclient=true
type OAuthClient struct {
unversioned.TypeMeta
kapi.ObjectMeta
// Secret is the unique secret associated with a client
Secret string
// AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation
// and for service account token validation
AdditionalSecrets []string
// RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects
RespondWithChallenges bool
// RedirectURIs is the valid redirection URIs associated with a client
RedirectURIs []string
// GrantMethod determines how to handle grants for this client. If no method is provided, the
// cluster default grant handling method will be used
GrantMethod GrantHandlerType
// ScopeRestrictions describes which scopes this client can request. Each requested scope
// is checked against each restriction. If any restriction matches, then the scope is allowed.
// If no restriction matches, then the scope is denied.
ScopeRestrictions []ScopeRestriction
}
type GrantHandlerType string
const (
// GrantHandlerAuto auto-approves client authorization grant requests
GrantHandlerAuto GrantHandlerType = "auto"
// GrantHandlerPrompt prompts the user to approve new client authorization grant requests
GrantHandlerPrompt GrantHandlerType = "prompt"
// GrantHandlerDeny auto-denies client authorization grant requests
GrantHandlerDeny GrantHandlerType = "deny"
)
// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.
type ScopeRestriction struct {
// ExactValues means the scope has to match a particular set of strings exactly
ExactValues []string
// ClusterRole describes a set of restrictions for cluster role scoping.
ClusterRole *ClusterRoleScopeRestriction
}
// ClusterRoleScopeRestriction describes restrictions on cluster role scopes
type ClusterRoleScopeRestriction struct {
// RoleNames is the list of cluster roles that can referenced. * means anything
RoleNames []string
// Namespaces is the list of namespaces that can be referenced. * means any of them (including *)
Namespaces []string
// AllowEscalation indicates whether you can request roles and their escalating resources
AllowEscalation bool
}
type OAuthClientAuthorization struct {
unversioned.TypeMeta
kapi.ObjectMeta
// ClientName references the client that created this authorization
ClientName string
// UserName is the user name that authorized this client
UserName string
// UserUID is the unique UID associated with this authorization. UserUID and UserName
// must both match for this authorization to be valid.
UserUID string
// Scopes is an array of the granted scopes.
Scopes []string
}
type OAuthAccessTokenList struct {
unversioned.TypeMeta
unversioned.ListMeta
Items []OAuthAccessToken
}
type OAuthAuthorizeTokenList struct {
unversioned.TypeMeta
unversioned.ListMeta
Items []OAuthAuthorizeToken
}
type OAuthClientList struct {
unversioned.TypeMeta
unversioned.ListMeta
Items []OAuthClient
}
type OAuthClientAuthorizationList struct {
unversioned.TypeMeta
unversioned.ListMeta
Items []OAuthClientAuthorization
}

View File

@ -0,0 +1,91 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package api
import (
api "k8s.io/kubernetes/pkg/api"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
conversion "k8s.io/kubernetes/pkg/conversion"
)
func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_api_Project,
DeepCopy_api_ProjectList,
DeepCopy_api_ProjectRequest,
DeepCopy_api_ProjectSpec,
DeepCopy_api_ProjectStatus,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
func DeepCopy_api_Project(in Project, out *Project, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := DeepCopy_api_ProjectSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_api_ProjectStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_ProjectList(in ProjectList, out *ProjectList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]Project, len(in))
for i := range in {
if err := DeepCopy_api_Project(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_ProjectRequest(in ProjectRequest, out *ProjectRequest, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.DisplayName = in.DisplayName
out.Description = in.Description
return nil
}
func DeepCopy_api_ProjectSpec(in ProjectSpec, out *ProjectSpec, c *conversion.Cloner) error {
if in.Finalizers != nil {
in, out := in.Finalizers, &out.Finalizers
*out = make([]api.FinalizerName, len(in))
for i := range in {
(*out)[i] = in[i]
}
} else {
out.Finalizers = nil
}
return nil
}
func DeepCopy_api_ProjectStatus(in ProjectStatus, out *ProjectStatus, c *conversion.Cloner) error {
out.Phase = in.Phase
return nil
}

View File

@ -0,0 +1,23 @@
package api
import (
"fmt"
)
const (
displayNameOldAnnotation = "displayName"
displayNameAnnotation = "openshift.io/display-name"
)
// DisplayNameAndNameForProject returns a formatted string containing the name
// of the project and includes the display name if it differs.
func DisplayNameAndNameForProject(project *Project) string {
displayName := project.Annotations[displayNameAnnotation]
if len(displayName) == 0 {
displayName = project.Annotations[displayNameOldAnnotation]
}
if len(displayName) > 0 && displayName != project.Name {
return fmt.Sprintf("%s (%s)", displayName, project.Name)
}
return project.Name
}

View File

@ -0,0 +1,39 @@
package api
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) unversioned.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
func Resource(resource string) unversioned.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func AddToScheme(scheme *runtime.Scheme) {
// Add the API to Scheme.
addKnownTypes(scheme)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) {
scheme.AddKnownTypes(SchemeGroupVersion,
&Project{},
&ProjectList{},
&ProjectRequest{},
)
}
func (obj *ProjectRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *Project) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *ProjectList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -0,0 +1,61 @@
package api
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
)
// ProjectList is a list of Project objects.
type ProjectList struct {
unversioned.TypeMeta
unversioned.ListMeta
Items []Project
}
const (
// These are internal finalizer values to Origin
FinalizerOrigin kapi.FinalizerName = "openshift.io/origin"
)
// ProjectSpec describes the attributes on a Project
type ProjectSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage
Finalizers []kapi.FinalizerName
}
// ProjectStatus is information about the current status of a Project
type ProjectStatus struct {
Phase kapi.NamespacePhase
}
// +genclient=true
// Project is a logical top-level container for a set of origin resources
type Project struct {
unversioned.TypeMeta
kapi.ObjectMeta
Spec ProjectSpec
Status ProjectStatus
}
type ProjectRequest struct {
unversioned.TypeMeta
kapi.ObjectMeta
DisplayName string
Description string
}
// These constants represent annotations keys affixed to projects
const (
// ProjectDisplayName is an annotation that stores the name displayed when querying for projects
ProjectDisplayName = "openshift.io/display-name"
// ProjectDescription is an annotatoion that holds the description of the project
ProjectDescription = "openshift.io/description"
// ProjectNodeSelector is an annotation that holds the node selector;
// the node selector annotation determines which nodes will have pods from this project scheduled to them
ProjectNodeSelector = "openshift.io/node-selector"
// ProjectRequester is the username that requested a given project. Its not guaranteed to be present,
// but it is set by the default project template.
ProjectRequester = "openshift.io/requester"
)

View File

@ -0,0 +1,190 @@
// +build !ignore_autogenerated_openshift
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package api
import (
api "k8s.io/kubernetes/pkg/api"
unversioned "k8s.io/kubernetes/pkg/api/unversioned"
conversion "k8s.io/kubernetes/pkg/conversion"
intstr "k8s.io/kubernetes/pkg/util/intstr"
)
func init() {
if err := api.Scheme.AddGeneratedDeepCopyFuncs(
DeepCopy_api_Route,
DeepCopy_api_RouteIngress,
DeepCopy_api_RouteIngressCondition,
DeepCopy_api_RouteList,
DeepCopy_api_RoutePort,
DeepCopy_api_RouteSpec,
DeepCopy_api_RouteStatus,
DeepCopy_api_RouteTargetReference,
DeepCopy_api_RouterShard,
DeepCopy_api_TLSConfig,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
panic(err)
}
}
func DeepCopy_api_Route(in Route, out *Route, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := DeepCopy_api_RouteSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_api_RouteStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_RouteIngress(in RouteIngress, out *RouteIngress, c *conversion.Cloner) error {
out.Host = in.Host
out.RouterName = in.RouterName
if in.Conditions != nil {
in, out := in.Conditions, &out.Conditions
*out = make([]RouteIngressCondition, len(in))
for i := range in {
if err := DeepCopy_api_RouteIngressCondition(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Conditions = nil
}
return nil
}
func DeepCopy_api_RouteIngressCondition(in RouteIngressCondition, out *RouteIngressCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
out.Reason = in.Reason
out.Message = in.Message
if in.LastTransitionTime != nil {
in, out := in.LastTransitionTime, &out.LastTransitionTime
*out = new(unversioned.Time)
if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil {
return err
}
} else {
out.LastTransitionTime = nil
}
return nil
}
func DeepCopy_api_RouteList(in RouteList, out *RouteList, c *conversion.Cloner) error {
if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]Route, len(in))
for i := range in {
if err := DeepCopy_api_Route(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func DeepCopy_api_RoutePort(in RoutePort, out *RoutePort, c *conversion.Cloner) error {
if err := intstr.DeepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil {
return err
}
return nil
}
func DeepCopy_api_RouteSpec(in RouteSpec, out *RouteSpec, c *conversion.Cloner) error {
out.Host = in.Host
out.Path = in.Path
if err := DeepCopy_api_RouteTargetReference(in.To, &out.To, c); err != nil {
return err
}
if in.AlternateBackends != nil {
in, out := in.AlternateBackends, &out.AlternateBackends
*out = make([]RouteTargetReference, len(in))
for i := range in {
if err := DeepCopy_api_RouteTargetReference(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.AlternateBackends = nil
}
if in.Port != nil {
in, out := in.Port, &out.Port
*out = new(RoutePort)
if err := DeepCopy_api_RoutePort(*in, *out, c); err != nil {
return err
}
} else {
out.Port = nil
}
if in.TLS != nil {
in, out := in.TLS, &out.TLS
*out = new(TLSConfig)
if err := DeepCopy_api_TLSConfig(*in, *out, c); err != nil {
return err
}
} else {
out.TLS = nil
}
return nil
}
func DeepCopy_api_RouteStatus(in RouteStatus, out *RouteStatus, c *conversion.Cloner) error {
if in.Ingress != nil {
in, out := in.Ingress, &out.Ingress
*out = make([]RouteIngress, len(in))
for i := range in {
if err := DeepCopy_api_RouteIngress(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
return nil
}
func DeepCopy_api_RouteTargetReference(in RouteTargetReference, out *RouteTargetReference, c *conversion.Cloner) error {
out.Kind = in.Kind
out.Name = in.Name
if in.Weight != nil {
in, out := in.Weight, &out.Weight
*out = new(int32)
**out = *in
} else {
out.Weight = nil
}
return nil
}
func DeepCopy_api_RouterShard(in RouterShard, out *RouterShard, c *conversion.Cloner) error {
out.ShardName = in.ShardName
out.DNSSuffix = in.DNSSuffix
return nil
}
func DeepCopy_api_TLSConfig(in TLSConfig, out *TLSConfig, c *conversion.Cloner) error {
out.Termination = in.Termination
out.Certificate = in.Certificate
out.Key = in.Key
out.CACertificate = in.CACertificate
out.DestinationCACertificate = in.DestinationCACertificate
out.InsecureEdgeTerminationPolicy = in.InsecureEdgeTerminationPolicy
return nil
}

Some files were not shown because too many files have changed in this diff Show More