From fe9713f26e8c3033a0be0158e1fa71c93de44c25 Mon Sep 17 00:00:00 2001 From: Tuna Date: Thu, 4 Aug 2016 19:13:35 +0700 Subject: [PATCH] update vendoring --- Godeps/Godeps.json | 571 +++- vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/exampledata.txt | 2388 +++++++++++++ .../beorn7/perks/quantile/stream.go | 292 ++ vendor/github.com/coreos/go-oidc/LICENSE | 202 ++ vendor/github.com/coreos/go-oidc/NOTICE | 5 + .../github.com/coreos/go-oidc/http/client.go | 7 + vendor/github.com/coreos/go-oidc/http/http.go | 159 + .../coreos/go-oidc/http/middleware.go | 14 + vendor/github.com/coreos/go-oidc/http/url.go | 29 + .../github.com/coreos/go-oidc/jose/claims.go | 126 + vendor/github.com/coreos/go-oidc/jose/jose.go | 112 + vendor/github.com/coreos/go-oidc/jose/jwk.go | 135 + vendor/github.com/coreos/go-oidc/jose/jws.go | 51 + vendor/github.com/coreos/go-oidc/jose/jwt.go | 82 + vendor/github.com/coreos/go-oidc/jose/sig.go | 24 + .../coreos/go-oidc/jose/sig_hmac.go | 67 + .../github.com/coreos/go-oidc/jose/sig_rsa.go | 67 + vendor/github.com/coreos/go-oidc/key/key.go | 153 + .../github.com/coreos/go-oidc/key/manager.go | 99 + vendor/github.com/coreos/go-oidc/key/repo.go | 55 + .../github.com/coreos/go-oidc/key/rotate.go | 165 + vendor/github.com/coreos/go-oidc/key/sync.go | 91 + .../github.com/coreos/go-oidc/oauth2/error.go | 29 + .../coreos/go-oidc/oauth2/oauth2.go | 416 +++ .../github.com/coreos/go-oidc/oidc/client.go | 846 +++++ .../coreos/go-oidc/oidc/identity.go | 44 + .../coreos/go-oidc/oidc/interface.go | 3 + vendor/github.com/coreos/go-oidc/oidc/key.go | 67 + .../coreos/go-oidc/oidc/provider.go | 688 ++++ .../coreos/go-oidc/oidc/transport.go | 88 + vendor/github.com/coreos/go-oidc/oidc/util.go | 109 + .../coreos/go-oidc/oidc/verification.go | 188 + vendor/github.com/coreos/go-systemd/LICENSE | 191 ++ .../coreos/go-systemd/journal/journal.go | 179 + vendor/github.com/coreos/pkg/LICENSE | 202 ++ vendor/github.com/coreos/pkg/NOTICE | 5 + .../github.com/coreos/pkg/capnslog/README.md | 39 + .../coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 240 ++ .../coreos/pkg/capnslog/pkg_logger.go | 177 + .../coreos/pkg/capnslog/syslog_formatter.go | 65 + vendor/github.com/coreos/pkg/health/README.md | 11 + vendor/github.com/coreos/pkg/health/health.go | 127 + .../github.com/coreos/pkg/httputil/README.md | 13 + .../github.com/coreos/pkg/httputil/cookie.go | 21 + vendor/github.com/coreos/pkg/httputil/json.go | 27 + .../github.com/coreos/pkg/timeutil/backoff.go | 15 + .../github.com/evanphx/json-patch/.travis.yml | 14 + vendor/github.com/evanphx/json-patch/LICENSE | 25 + .../github.com/evanphx/json-patch/README.md | 29 + vendor/github.com/evanphx/json-patch/merge.go | 306 ++ vendor/github.com/evanphx/json-patch/patch.go | 579 ++++ vendor/github.com/golang/groupcache/LICENSE | 191 ++ .../github.com/golang/groupcache/lru/lru.go | 121 + vendor/github.com/golang/protobuf/LICENSE | 31 + .../github.com/golang/protobuf/proto/Makefile | 43 + .../github.com/golang/protobuf/proto/clone.go | 223 ++ .../golang/protobuf/proto/decode.go | 867 +++++ .../golang/protobuf/proto/encode.go | 1325 ++++++++ .../github.com/golang/protobuf/proto/equal.go | 276 ++ .../golang/protobuf/proto/extensions.go | 399 +++ .../github.com/golang/protobuf/proto/lib.go | 894 +++++ .../golang/protobuf/proto/message_set.go | 280 ++ .../golang/protobuf/proto/pointer_reflect.go | 479 +++ .../golang/protobuf/proto/pointer_unsafe.go | 266 ++ .../golang/protobuf/proto/properties.go | 842 +++++ .../github.com/golang/protobuf/proto/text.go | 751 ++++ .../golang/protobuf/proto/text_parser.go | 806 +++++ vendor/github.com/google/cadvisor/LICENSE | 190 ++ .../google/cadvisor/info/v1/container.go | 583 ++++ .../google/cadvisor/info/v1/docker.go | 37 + .../google/cadvisor/info/v1/machine.go | 205 ++ .../google/cadvisor/info/v1/metric.go | 79 + vendor/github.com/imdario/mergo/.travis.yml | 2 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 68 + vendor/github.com/imdario/mergo/doc.go | 44 + vendor/github.com/imdario/mergo/map.go | 146 + vendor/github.com/imdario/mergo/merge.go | 99 + vendor/github.com/imdario/mergo/mergo.go | 90 + .../inconshreveable/mousetrap/LICENSE | 13 + .../inconshreveable/mousetrap/README.md | 23 + .../inconshreveable/mousetrap/trap_others.go | 15 + .../inconshreveable/mousetrap/trap_windows.go | 98 + .../mousetrap/trap_windows_1.4.go | 46 + .../github.com/jonboulle/clockwork/.gitignore | 25 + .../jonboulle/clockwork/.travis.yml | 3 + vendor/github.com/jonboulle/clockwork/LICENSE | 201 ++ .../github.com/jonboulle/clockwork/README.md | 61 + .../jonboulle/clockwork/clockwork.go | 164 + vendor/github.com/juju/ratelimit/LICENSE | 191 ++ vendor/github.com/juju/ratelimit/README.md | 117 + vendor/github.com/juju/ratelimit/ratelimit.go | 245 ++ vendor/github.com/juju/ratelimit/reader.go | 51 + .../golang_protobuf_extensions/LICENSE | 201 ++ .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../prometheus/client_golang/LICENSE | 201 ++ .../prometheus/client_golang/NOTICE | 28 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 53 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 175 + .../client_golang/prometheus/desc.go | 201 ++ .../client_golang/prometheus/doc.go | 109 + .../client_golang/prometheus/expvar.go | 119 + .../client_golang/prometheus/gauge.go | 147 + .../client_golang/prometheus/go_collector.go | 50 + .../client_golang/prometheus/histogram.go | 450 +++ .../client_golang/prometheus/http.go | 361 ++ .../client_golang/prometheus/metric.go | 166 + .../prometheus/process_collector.go | 142 + .../client_golang/prometheus/push.go | 65 + .../client_golang/prometheus/registry.go | 726 ++++ .../client_golang/prometheus/summary.go | 540 +++ .../client_golang/prometheus/untyped.go | 145 + .../client_golang/prometheus/value.go | 234 ++ .../client_golang/prometheus/vec.go | 247 ++ .../prometheus/client_model/LICENSE | 201 ++ .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 364 ++ vendor/github.com/prometheus/common/LICENSE | 201 ++ vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 412 +++ .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 40 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 300 ++ .../prometheus/common/expfmt/text_parse.go | 753 ++++ .../bitbucket.org/ww/goautoneg/README.txt | 67 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 206 ++ .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 98 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 249 ++ .../prometheus/common/model/value.go | 403 +++ .../github.com/prometheus/procfs/.travis.yml | 5 + .../github.com/prometheus/procfs/AUTHORS.md | 20 + .../prometheus/procfs/CONTRIBUTING.md | 18 + vendor/github.com/prometheus/procfs/LICENSE | 201 ++ vendor/github.com/prometheus/procfs/Makefile | 6 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 10 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 33 + vendor/github.com/prometheus/procfs/ipvs.go | 224 ++ vendor/github.com/prometheus/procfs/mdstat.go | 138 + vendor/github.com/prometheus/procfs/proc.go | 212 ++ .../github.com/prometheus/procfs/proc_io.go | 55 + .../prometheus/procfs/proc_limits.go | 137 + .../github.com/prometheus/procfs/proc_stat.go | 175 + vendor/github.com/prometheus/procfs/stat.go | 56 + vendor/github.com/spf13/cobra/.gitignore | 24 + vendor/github.com/spf13/cobra/.mailmap | 3 + vendor/github.com/spf13/cobra/.travis.yml | 13 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 + vendor/github.com/spf13/cobra/README.md | 877 +++++ .../spf13/cobra/bash_completions.go | 602 ++++ .../spf13/cobra/bash_completions.md | 206 ++ vendor/github.com/spf13/cobra/cobra.go | 171 + vendor/github.com/spf13/cobra/command.go | 1193 +++++++ .../github.com/spf13/cobra/command_notwin.go | 5 + vendor/github.com/spf13/cobra/command_win.go | 26 + .../x/net/context/ctxhttp/cancelreq.go | 19 + .../x/net/context/ctxhttp/cancelreq_go14.go | 23 + .../x/net/context/ctxhttp/ctxhttp.go | 140 + vendor/golang.org/x/oauth2/.travis.yml | 14 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 64 + .../golang.org/x/oauth2/client_appengine.go | 25 + .../golang.org/x/oauth2/google/appengine.go | 83 + .../x/oauth2/google/appengine_hook.go | 13 + vendor/golang.org/x/oauth2/google/default.go | 154 + vendor/golang.org/x/oauth2/google/google.go | 145 + vendor/golang.org/x/oauth2/google/sdk.go | 168 + vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + vendor/golang.org/x/oauth2/internal/token.go | 213 ++ .../golang.org/x/oauth2/internal/transport.go | 67 + vendor/golang.org/x/oauth2/jws/jws.go | 160 + vendor/golang.org/x/oauth2/jwt/jwt.go | 147 + vendor/golang.org/x/oauth2/oauth2.go | 325 ++ vendor/golang.org/x/oauth2/token.go | 143 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/google.golang.org/cloud/LICENSE | 202 ++ .../cloud/compute/metadata/metadata.go | 382 +++ .../google.golang.org/cloud/internal/cloud.go | 128 + .../apis/federation/deep_copy_generated.go | 145 + .../apis/federation/install/install.go | 129 + .../federation/apis/federation/register.go | 56 + .../federation/apis/federation/types.go | 109 + .../apis/federation/v1beta1/conversion.go | 40 + .../v1beta1/conversion_generated.go | 296 ++ .../federation/v1beta1/deep_copy_generated.go | 146 + .../apis/federation/v1beta1/defaults.go | 24 + .../federation/apis/federation/v1beta1/doc.go | 18 + .../apis/federation/v1beta1/generated.pb.go | 1398 ++++++++ .../apis/federation/v1beta1/generated.proto | 114 + .../apis/federation/v1beta1/register.go | 50 + .../apis/federation/v1beta1/types.go | 109 + .../federation_internalclientset/clientset.go | 107 + .../federation_internalclientset/doc.go | 20 + .../import_known_versions.go | 25 + .../typed/core/unversioned/core_client.go | 101 + .../typed/core/unversioned/doc.go | 20 + .../core/unversioned/generated_expansion.go | 19 + .../typed/core/unversioned/service.go | 149 + .../typed/federation/unversioned/cluster.go | 140 + .../typed/federation/unversioned/doc.go | 20 + .../unversioned/federation_client.go | 101 + .../unversioned/generated_expansion.go | 19 + .../pkg/api/annotations/annotations.go | 23 + .../kubernetes/pkg/api/annotations/doc.go | 18 + .../k8s.io/kubernetes/pkg/api/rest/create.go | 126 + .../k8s.io/kubernetes/pkg/api/rest/delete.go | 93 + vendor/k8s.io/kubernetes/pkg/api/rest/doc.go | 18 + .../k8s.io/kubernetes/pkg/api/rest/export.go | 28 + vendor/k8s.io/kubernetes/pkg/api/rest/rest.go | 306 ++ .../k8s.io/kubernetes/pkg/api/rest/types.go | 42 + .../k8s.io/kubernetes/pkg/api/rest/update.go | 175 + .../pkg/apis/apps/deep_copy_generated.go | 117 + .../pkg/apis/apps/install/install.go | 125 + .../kubernetes/pkg/apis/apps/register.go | 54 + .../k8s.io/kubernetes/pkg/apis/apps/types.go | 94 + .../pkg/apis/apps/v1alpha1/conversion.go | 118 + .../apps/v1alpha1/conversion_generated.go | 156 + .../apis/apps/v1alpha1/deep_copy_generated.go | 124 + .../pkg/apis/apps/v1alpha1/defaults.go | 46 + .../kubernetes/pkg/apis/apps/v1alpha1/doc.go | 18 + .../pkg/apis/apps/v1alpha1/generated.pb.go | 969 ++++++ .../pkg/apis/apps/v1alpha1/generated.proto | 101 + .../pkg/apis/apps/v1alpha1/register.go | 50 + .../pkg/apis/apps/v1alpha1/types.go | 94 + .../v1alpha1/types_swagger_doc_generated.go | 71 + .../deep_copy_generated.go | 91 + .../authentication.k8s.io/install/install.go | 123 + .../apis/authentication.k8s.io/register.go | 50 + .../pkg/apis/authentication.k8s.io/types.go | 61 + .../v1beta1/conversion.go | 30 + .../v1beta1/conversion_generated.go | 143 + .../v1beta1/deep_copy_generated.go | 91 + .../authentication.k8s.io/v1beta1/defaults.go | 25 + .../apis/authentication.k8s.io/v1beta1/doc.go | 18 + .../authentication.k8s.io/v1beta1/register.go | 44 + .../authentication.k8s.io/v1beta1/types.go | 63 + .../apis/authorization/deep_copy_generated.go | 170 + .../pkg/apis/authorization/install/install.go | 128 + .../pkg/apis/authorization/register.go | 50 + .../pkg/apis/authorization/types.go | 124 + .../apis/authorization/v1beta1/conversion.go | 30 + .../v1beta1/conversion_generated.go | 333 ++ .../v1beta1/deep_copy_generated.go | 170 + .../apis/authorization/v1beta1/defaults.go | 25 + .../pkg/apis/authorization/v1beta1/doc.go | 18 + .../apis/authorization/v1beta1/register.go | 48 + .../pkg/apis/authorization/v1beta1/types.go | 123 + .../v1beta1/types_swagger_doc_generated.go | 118 + .../pkg/apis/autoscaling/install/install.go | 129 + .../autoscaling/v1/conversion_generated.go | 300 ++ .../autoscaling/v1/deep_copy_generated.go | 166 + .../pkg/apis/autoscaling/v1/defaults.go | 34 + .../kubernetes/pkg/apis/autoscaling/v1/doc.go | 18 + .../pkg/apis/autoscaling/v1/generated.pb.go | 1612 +++++++++ .../pkg/apis/autoscaling/v1/generated.proto | 130 + .../pkg/apis/autoscaling/v1/register.go | 47 + .../pkg/apis/autoscaling/v1/types.go | 122 + .../v1/types_swagger_doc_generated.go | 117 + .../pkg/apis/batch/install/install.go | 140 + .../pkg/apis/batch/v1/conversion.go | 106 + .../pkg/apis/batch/v1/conversion_generated.go | 330 ++ .../pkg/apis/batch/v1/deep_copy_generated.go | 211 ++ .../kubernetes/pkg/apis/batch/v1/defaults.go | 42 + .../kubernetes/pkg/apis/batch/v1/doc.go | 18 + .../pkg/apis/batch/v1/generated.pb.go | 1901 +++++++++++ .../pkg/apis/batch/v1/generated.proto | 176 + .../kubernetes/pkg/apis/batch/v1/register.go | 47 + .../kubernetes/pkg/apis/batch/v1/types.go | 186 + .../batch/v1/types_swagger_doc_generated.go | 114 + .../pkg/apis/batch/v2alpha1/conversion.go | 116 + .../batch/v2alpha1/conversion_generated.go | 573 ++++ .../batch/v2alpha1/deep_copy_generated.go | 324 ++ .../pkg/apis/batch/v2alpha1/defaults.go | 49 + .../kubernetes/pkg/apis/batch/v2alpha1/doc.go | 18 + .../pkg/apis/batch/v2alpha1/generated.pb.go | 3018 +++++++++++++++++ .../pkg/apis/batch/v2alpha1/generated.proto | 253 ++ .../pkg/apis/batch/v2alpha1/register.go | 50 + .../pkg/apis/batch/v2alpha1/types.go | 283 ++ .../v2alpha1/types_swagger_doc_generated.go | 178 + .../componentconfig/deep_copy_generated.go | 374 ++ .../pkg/apis/componentconfig/helpers.go | 97 + .../apis/componentconfig/install/install.go | 129 + .../pkg/apis/componentconfig/register.go | 50 + .../pkg/apis/componentconfig/types.go | 621 ++++ .../v1alpha1/conversion_generated.go | 182 + .../v1alpha1/deep_copy_generated.go | 127 + .../apis/componentconfig/v1alpha1/defaults.go | 114 + .../pkg/apis/componentconfig/v1alpha1/doc.go | 18 + .../apis/componentconfig/v1alpha1/register.go | 40 + .../apis/componentconfig/v1alpha1/types.go | 141 + .../apis/extensions/validation/validation.go | 746 ++++ .../pkg/apis/policy/deep_copy_generated.go | 101 + .../pkg/apis/policy/install/install.go | 129 + .../kubernetes/pkg/apis/policy/register.go | 52 + .../kubernetes/pkg/apis/policy/types.go | 70 + .../policy/v1alpha1/conversion_generated.go | 183 + .../policy/v1alpha1/deep_copy_generated.go | 102 + .../pkg/apis/policy/v1alpha1/doc.go | 21 + .../pkg/apis/policy/v1alpha1/generated.pb.go | 903 +++++ .../pkg/apis/policy/v1alpha1/generated.proto | 76 + .../pkg/apis/policy/v1alpha1/register.go | 50 + .../pkg/apis/policy/v1alpha1/types.go | 71 + .../v1alpha1/types_swagger_doc_generated.go | 70 + .../pkg/apis/rbac/deep_copy_generated.go | 274 ++ vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go | 18 + .../pkg/apis/rbac/install/install.go | 130 + .../kubernetes/pkg/apis/rbac/register.go | 64 + .../k8s.io/kubernetes/pkg/apis/rbac/types.go | 176 + .../rbac/v1alpha1/conversion_generated.go | 536 +++ .../apis/rbac/v1alpha1/deep_copy_generated.go | 271 ++ .../kubernetes/pkg/apis/rbac/v1alpha1/doc.go | 19 + .../pkg/apis/rbac/v1alpha1/generated.pb.go | 2209 ++++++++++++ .../pkg/apis/rbac/v1alpha1/generated.proto | 158 + .../pkg/apis/rbac/v1alpha1/register.go | 52 + .../pkg/apis/rbac/v1alpha1/types.go | 163 + .../v1alpha1/types_swagger_doc_generated.go | 138 + .../kubernetes/pkg/client/cache/delta_fifo.go | 600 ++++ .../k8s.io/kubernetes/pkg/client/cache/doc.go | 24 + .../pkg/client/cache/expiration_cache.go | 208 ++ .../client/cache/expiration_cache_fakes.go | 54 + .../pkg/client/cache/fake_custom_store.go | 102 + .../kubernetes/pkg/client/cache/fifo.go | 294 ++ .../kubernetes/pkg/client/cache/index.go | 82 + .../kubernetes/pkg/client/cache/listers.go | 672 ++++ .../kubernetes/pkg/client/cache/listwatch.go | 86 + .../kubernetes/pkg/client/cache/reflector.go | 423 +++ .../kubernetes/pkg/client/cache/store.go | 240 ++ .../pkg/client/cache/thread_safe_store.go | 287 ++ .../pkg/client/cache/undelta_store.go | 83 + .../internalclientset/clientset.go | 158 + .../internalclientset/doc.go | 20 + .../import_known_versions.go | 39 + .../unversioned/autoscaling_client.go | 101 + .../typed/autoscaling/unversioned/doc.go | 20 + .../unversioned/generated_expansion.go | 19 + .../unversioned/horizontalpodautoscaler.go | 150 + .../typed/batch/unversioned/batch_client.go | 106 + .../typed/batch/unversioned/doc.go | 20 + .../batch/unversioned/generated_expansion.go | 21 + .../typed/batch/unversioned/job.go | 150 + .../typed/batch/unversioned/scheduledjob.go | 150 + .../typed/core/unversioned/componentstatus.go | 126 + .../typed/core/unversioned/configmap.go | 135 + .../typed/core/unversioned/core_client.go | 181 + .../typed/core/unversioned/doc.go | 20 + .../typed/core/unversioned/endpoints.go | 135 + .../typed/core/unversioned/event.go | 135 + .../typed/core/unversioned/event_expansion.go | 157 + .../core/unversioned/generated_expansion.go | 39 + .../typed/core/unversioned/limitrange.go | 135 + .../typed/core/unversioned/namespace.go | 139 + .../core/unversioned/namespace_expansion.go | 31 + .../typed/core/unversioned/node.go | 139 + .../typed/core/unversioned/node_expansion.go | 40 + .../core/unversioned/persistentvolume.go | 139 + .../core/unversioned/persistentvolumeclaim.go | 149 + .../typed/core/unversioned/pod.go | 149 + .../typed/core/unversioned/pod_expansion.go | 38 + .../typed/core/unversioned/podtemplate.go | 135 + .../core/unversioned/replicationcontroller.go | 149 + .../typed/core/unversioned/resourcequota.go | 149 + .../typed/core/unversioned/secret.go | 135 + .../unversioned/securitycontextconstraints.go | 126 + .../securitycontextconstraints_expansion.go | 41 + .../typed/core/unversioned/service.go | 149 + .../core/unversioned/service_expansion.go | 41 + .../typed/core/unversioned/serviceaccount.go | 135 + .../typed/extensions/unversioned/daemonset.go | 150 + .../extensions/unversioned/deployment.go | 150 + .../unversioned/deployment_expansion.go | 29 + .../typed/extensions/unversioned/doc.go | 20 + .../unversioned/extensions_client.go | 131 + .../unversioned/generated_expansion.go | 31 + .../typed/extensions/unversioned/ingress.go | 150 + .../typed/extensions/unversioned/job.go | 150 + .../unversioned/podsecuritypolicy.go | 127 + .../extensions/unversioned/replicaset.go | 150 + .../typed/extensions/unversioned/scale.go | 42 + .../extensions/unversioned/scale_expansion.go | 65 + .../unversioned/thirdpartyresource.go | 127 + .../typed/rbac/unversioned/clusterrole.go | 127 + .../rbac/unversioned/clusterrolebinding.go | 127 + .../typed/rbac/unversioned/doc.go | 20 + .../rbac/unversioned/generated_expansion.go | 25 + .../typed/rbac/unversioned/rbac_client.go | 116 + .../typed/rbac/unversioned/role.go | 136 + .../typed/rbac/unversioned/rolebinding.go | 136 + .../kubernetes/pkg/client/metrics/metrics.go | 67 + .../kubernetes/pkg/client/record/doc.go | 18 + .../kubernetes/pkg/client/record/event.go | 315 ++ .../pkg/client/record/events_cache.go | 360 ++ .../kubernetes/pkg/client/record/fake.go | 54 + .../pkg/client/restclient/client.go | 228 ++ .../pkg/client/restclient/config.go | 328 ++ .../pkg/client/restclient/plugin.go | 73 + .../pkg/client/restclient/request.go | 1086 ++++++ .../pkg/client/restclient/transport.go | 94 + .../pkg/client/restclient/url_utils.go | 93 + .../pkg/client/restclient/urlbackoff.go | 107 + .../pkg/client/restclient/versions.go | 88 + .../kubernetes/pkg/client/transport/cache.go | 88 + .../kubernetes/pkg/client/transport/config.go | 84 + .../pkg/client/transport/round_trippers.go | 337 ++ .../pkg/client/transport/transport.go | 140 + .../typed/discovery/discovery_client.go | 317 ++ .../internalclientset/clientset_adaption.go | 61 + .../kubernetes/pkg/client/unversioned/apps.go | 83 + .../pkg/client/unversioned/auth/clientauth.go | 125 + .../pkg/client/unversioned/autoscaling.go | 84 + .../pkg/client/unversioned/batch.go | 114 + .../pkg/client/unversioned/client.go | 179 + .../unversioned/clientcmd/api/helpers.go | 183 + .../clientcmd/api/latest/latest.go | 54 + .../unversioned/clientcmd/api/register.go | 43 + .../client/unversioned/clientcmd/api/types.go | 152 + .../clientcmd/api/v1/conversion.go | 231 ++ .../unversioned/clientcmd/api/v1/register.go | 40 + .../unversioned/clientcmd/api/v1/types.go | 145 + .../unversioned/clientcmd/auth_loaders.go | 91 + .../unversioned/clientcmd/client_config.go | 411 +++ .../client/unversioned/clientcmd/config.go | 419 +++ .../pkg/client/unversioned/clientcmd/doc.go | 37 + .../client/unversioned/clientcmd/loader.go | 580 ++++ .../clientcmd/merged_client_builder.go | 122 + .../client/unversioned/clientcmd/overrides.go | 198 ++ .../unversioned/clientcmd/validation.go | 270 ++ .../client/unversioned/clusterrolebindings.go | 92 + .../pkg/client/unversioned/clusterroles.go | 92 + .../client/unversioned/componentstatuses.go | 60 + .../pkg/client/unversioned/conditions.go | 240 ++ .../pkg/client/unversioned/configmap.go | 122 + .../pkg/client/unversioned/containerinfo.go | 123 + .../pkg/client/unversioned/daemon_sets.go | 100 + .../pkg/client/unversioned/deployment.go | 111 + .../kubernetes/pkg/client/unversioned/doc.go | 57 + .../pkg/client/unversioned/endpoints.go | 101 + .../pkg/client/unversioned/events.go | 219 ++ .../pkg/client/unversioned/extensions.go | 138 + .../pkg/client/unversioned/flags.go | 31 + .../pkg/client/unversioned/helper.go | 267 ++ .../unversioned/horizontalpodautoscaler.go | 103 + .../unversioned/import_known_versions.go | 40 + .../pkg/client/unversioned/ingress.go | 100 + .../kubernetes/pkg/client/unversioned/jobs.go | 167 + .../pkg/client/unversioned/limit_ranges.go | 94 + .../pkg/client/unversioned/namespaces.go | 116 + .../pkg/client/unversioned/network_policys.go | 92 + .../pkg/client/unversioned/nodes.go | 100 + .../unversioned/persistentvolumeclaim.go | 99 + .../client/unversioned/persistentvolumes.go | 93 + .../pkg/client/unversioned/pet_sets.go | 100 + .../unversioned/pod_disruption_budgets.go | 100 + .../pkg/client/unversioned/pod_templates.go | 94 + .../kubernetes/pkg/client/unversioned/pods.go | 115 + .../client/unversioned/podsecuritypolicy.go | 111 + .../pkg/client/unversioned/policy.go | 83 + .../kubernetes/pkg/client/unversioned/rbac.go | 103 + .../pkg/client/unversioned/replica_sets.go | 100 + .../unversioned/replication_controllers.go | 99 + .../pkg/client/unversioned/resource_quotas.go | 102 + .../pkg/client/unversioned/rolebindings.go | 95 + .../pkg/client/unversioned/roles.go | 95 + .../pkg/client/unversioned/scale.go | 77 + .../pkg/client/unversioned/scheduledjobs.go | 103 + .../pkg/client/unversioned/secrets.go | 120 + .../unversioned/securitycontextconstraints.go | 110 + .../client/unversioned/service_accounts.go | 120 + .../pkg/client/unversioned/services.go | 121 + .../client/unversioned/thirdpartyresources.go | 98 + .../kubernetes/pkg/client/unversioned/util.go | 79 + .../k8s.io/kubernetes/pkg/controller/OWNERS | 4 + .../pkg/controller/controller_utils.go | 698 ++++ .../k8s.io/kubernetes/pkg/controller/doc.go | 19 + .../pkg/controller/framework/controller.go | 326 ++ .../pkg/controller/framework/doc.go | 18 + .../framework/fake_controller_source.go | 262 ++ .../controller/framework/mutation_detector.go | 135 + .../controller/framework/shared_informer.go | 382 +++ .../kubernetes/pkg/controller/lookup_cache.go | 92 + .../kubernetes/pkg/credentialprovider/OWNERS | 3 + .../pkg/credentialprovider/config.go | 256 ++ .../kubernetes/pkg/credentialprovider/doc.go | 19 + .../pkg/credentialprovider/keyring.go | 338 ++ .../pkg/credentialprovider/plugins.go | 62 + .../pkg/credentialprovider/provider.go | 119 + vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go | 19 + .../kubernetes/pkg/fieldpath/fieldpath.go | 124 + vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS | 8 + vendor/k8s.io/kubernetes/pkg/kubectl/apply.go | 194 ++ .../kubernetes/pkg/kubectl/autoscale.go | 130 + .../kubernetes/pkg/kubectl/bash_comp_utils.go | 36 + .../pkg/kubectl/cmd/util/clientcache.go | 168 + .../pkg/kubectl/cmd/util/factory.go | 1230 +++++++ .../pkg/kubectl/cmd/util/helpers.go | 611 ++++ .../pkg/kubectl/cmd/util/printing.go | 136 + .../kubernetes/pkg/kubectl/configmap.go | 212 ++ .../pkg/kubectl/custom_column_printer.go | 224 ++ .../k8s.io/kubernetes/pkg/kubectl/describe.go | 2487 ++++++++++++++ vendor/k8s.io/kubernetes/pkg/kubectl/doc.go | 20 + .../k8s.io/kubernetes/pkg/kubectl/explain.go | 251 ++ .../k8s.io/kubernetes/pkg/kubectl/generate.go | 200 ++ .../k8s.io/kubernetes/pkg/kubectl/history.go | 125 + .../kubernetes/pkg/kubectl/interfaces.go | 32 + .../k8s.io/kubernetes/pkg/kubectl/kubectl.go | 248 ++ .../kubernetes/pkg/kubectl/namespace.go | 79 + .../kubernetes/pkg/kubectl/proxy_server.go | 248 ++ .../pkg/kubectl/resource/builder.go | 747 ++++ .../kubernetes/pkg/kubectl/resource/doc.go | 24 + .../kubernetes/pkg/kubectl/resource/helper.go | 166 + .../pkg/kubectl/resource/interfaces.go | 46 + .../kubernetes/pkg/kubectl/resource/mapper.go | 167 + .../kubernetes/pkg/kubectl/resource/result.go | 291 ++ .../pkg/kubectl/resource/selector.go | 90 + .../pkg/kubectl/resource/visitor.go | 643 ++++ .../pkg/kubectl/resource_printer.go | 2214 ++++++++++++ .../k8s.io/kubernetes/pkg/kubectl/rollback.go | 122 + .../kubernetes/pkg/kubectl/rolling_updater.go | 810 +++++ .../kubernetes/pkg/kubectl/rollout_status.go | 57 + vendor/k8s.io/kubernetes/pkg/kubectl/run.go | 869 +++++ vendor/k8s.io/kubernetes/pkg/kubectl/scale.go | 389 +++ .../k8s.io/kubernetes/pkg/kubectl/secret.go | 207 ++ .../pkg/kubectl/secret_for_docker_registry.go | 131 + .../kubernetes/pkg/kubectl/secret_for_tls.go | 124 + .../k8s.io/kubernetes/pkg/kubectl/service.go | 229 ++ .../kubernetes/pkg/kubectl/serviceaccount.go | 51 + .../pkg/kubectl/sorted_event_list.go | 36 + .../pkg/kubectl/sorted_resource_name_list.go | 72 + .../kubernetes/pkg/kubectl/sorting_printer.go | 219 ++ vendor/k8s.io/kubernetes/pkg/kubectl/stop.go | 448 +++ .../k8s.io/kubernetes/pkg/kubectl/version.go | 40 + .../kubernetes/pkg/kubectl/watchloop.go | 45 + .../k8s.io/kubernetes/pkg/kubelet/qos/doc.go | 25 + .../kubernetes/pkg/kubelet/qos/policy.go | 67 + .../kubernetes/pkg/kubelet/qos/util/qos.go | 146 + .../k8s.io/kubernetes/pkg/master/ports/doc.go | 19 + .../kubernetes/pkg/master/ports/ports.go | 40 + .../kubernetes/pkg/registry/generic/doc.go | 19 + .../pkg/registry/generic/matcher.go | 142 + .../pkg/registry/generic/options.go | 28 + .../pkg/registry/generic/storage_decorator.go | 44 + .../registry/thirdpartyresourcedata/codec.go | 601 ++++ .../registry/thirdpartyresourcedata/doc.go | 19 + .../thirdpartyresourcedata/registry.go | 80 + .../thirdpartyresourcedata/strategy.go | 92 + .../registry/thirdpartyresourcedata/util.go | 68 + .../security/podsecuritypolicy/util/util.go | 145 + vendor/k8s.io/kubernetes/pkg/storage/OWNERS | 6 + .../k8s.io/kubernetes/pkg/storage/cacher.go | 629 ++++ vendor/k8s.io/kubernetes/pkg/storage/doc.go | 18 + .../k8s.io/kubernetes/pkg/storage/errors.go | 174 + .../kubernetes/pkg/storage/interfaces.go | 171 + vendor/k8s.io/kubernetes/pkg/storage/util.go | 90 + .../kubernetes/pkg/storage/watch_cache.go | 331 ++ .../kubernetes/pkg/util/crypto/crypto.go | 190 ++ .../pkg/util/deployment/deployment.go | 478 +++ .../k8s.io/kubernetes/pkg/util/diff/diff.go | 267 ++ .../k8s.io/kubernetes/pkg/util/flag/flags.go | 51 + .../kubernetes/pkg/util/flag/tristate.go | 83 + .../pkg/util/flowcontrol/backoff.go | 149 + .../pkg/util/flowcontrol/throttle.go | 116 + .../kubernetes/pkg/util/homedir/homedir.go | 40 + .../kubernetes/pkg/util/integer/integer.go | 45 + .../kubernetes/pkg/util/jsonpath/doc.go | 20 + .../kubernetes/pkg/util/jsonpath/jsonpath.go | 486 +++ .../kubernetes/pkg/util/jsonpath/node.go | 239 ++ .../kubernetes/pkg/util/jsonpath/parser.go | 427 +++ .../k8s.io/kubernetes/pkg/util/labels/doc.go | 18 + .../kubernetes/pkg/util/labels/labels.go | 126 + vendor/k8s.io/kubernetes/pkg/util/pod/doc.go | 18 + vendor/k8s.io/kubernetes/pkg/util/pod/pod.go | 100 + .../pkg/util/replicaset/replicaset.go | 110 + .../k8s.io/kubernetes/pkg/util/slice/slice.go | 61 + .../pkg/util/strategicpatch/patch.go | 1243 +++++++ .../kubernetes/pkg/version/.gitattributes | 1 + vendor/k8s.io/kubernetes/pkg/version/base.go | 59 + vendor/k8s.io/kubernetes/pkg/version/doc.go | 19 + .../k8s.io/kubernetes/pkg/version/semver.go | 50 + .../k8s.io/kubernetes/pkg/version/version.go | 76 + .../plugin/pkg/client/auth/gcp/gcp.go | 106 + .../plugin/pkg/client/auth/oidc/OWNERS | 2 + .../plugin/pkg/client/auth/oidc/oidc.go | 270 ++ .../plugin/pkg/client/auth/plugins.go | 23 + .../third_party/forked/json/LICENSE | 27 + .../third_party/forked/json/fields.go | 501 +++ .../third_party/golang/template/exec.go | 94 + .../third_party/golang/template/funcs.go | 599 ++++ 610 files changed, 113138 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/coreos/go-oidc/LICENSE create mode 100644 vendor/github.com/coreos/go-oidc/NOTICE create mode 100644 vendor/github.com/coreos/go-oidc/http/client.go create mode 100644 vendor/github.com/coreos/go-oidc/http/http.go create mode 100644 vendor/github.com/coreos/go-oidc/http/middleware.go create mode 100644 vendor/github.com/coreos/go-oidc/http/url.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/claims.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jose.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jwk.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jws.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jwt.go create mode 100755 vendor/github.com/coreos/go-oidc/jose/sig.go create mode 100755 vendor/github.com/coreos/go-oidc/jose/sig_hmac.go create mode 100755 vendor/github.com/coreos/go-oidc/jose/sig_rsa.go create mode 100644 vendor/github.com/coreos/go-oidc/key/key.go create mode 100644 vendor/github.com/coreos/go-oidc/key/manager.go create mode 100644 vendor/github.com/coreos/go-oidc/key/repo.go create mode 100644 vendor/github.com/coreos/go-oidc/key/rotate.go create mode 100644 vendor/github.com/coreos/go-oidc/key/sync.go create mode 100644 vendor/github.com/coreos/go-oidc/oauth2/error.go create mode 100644 vendor/github.com/coreos/go-oidc/oauth2/oauth2.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/client.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/identity.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/interface.go create mode 100755 vendor/github.com/coreos/go-oidc/oidc/key.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/provider.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/transport.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/util.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/verification.go create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/health/README.md create mode 100644 vendor/github.com/coreos/pkg/health/health.go create mode 100644 vendor/github.com/coreos/pkg/httputil/README.md create mode 100644 vendor/github.com/coreos/pkg/httputil/cookie.go create mode 100644 vendor/github.com/coreos/pkg/httputil/json.go create mode 100644 vendor/github.com/coreos/pkg/timeutil/backoff.go create mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE create mode 100644 vendor/github.com/evanphx/json-patch/README.md create mode 100644 vendor/github.com/evanphx/json-patch/merge.go create mode 100644 vendor/github.com/evanphx/json-patch/patch.go create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/google/cadvisor/LICENSE create mode 100644 vendor/github.com/google/cadvisor/info/v1/container.go create mode 100644 vendor/github.com/google/cadvisor/info/v1/docker.go create mode 100644 vendor/github.com/google/cadvisor/info/v1/machine.go create mode 100644 vendor/github.com/google/cadvisor/info/v1/metric.go create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go create mode 100644 vendor/github.com/jonboulle/clockwork/.gitignore create mode 100644 vendor/github.com/jonboulle/clockwork/.travis.yml create mode 100644 vendor/github.com/jonboulle/clockwork/LICENSE create mode 100644 vendor/github.com/jonboulle/clockwork/README.md create mode 100644 vendor/github.com/jonboulle/clockwork/clockwork.go create mode 100644 vendor/github.com/juju/ratelimit/LICENSE create mode 100644 vendor/github.com/juju/ratelimit/README.md create mode 100644 vendor/github.com/juju/ratelimit/ratelimit.go create mode 100644 vendor/github.com/juju/ratelimit/reader.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/.travis.yml create mode 100644 vendor/github.com/prometheus/procfs/AUTHORS.md create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/spf13/cobra/.gitignore create mode 100644 vendor/github.com/spf13/cobra/.mailmap create mode 100644 vendor/github.com/spf13/cobra/.travis.yml create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/google.golang.org/cloud/LICENSE create mode 100644 vendor/google.golang.org/cloud/compute/metadata/metadata.go create mode 100644 vendor/google.golang.org/cloud/internal/cloud.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/register.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/types.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go create mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go create mode 100644 vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/create.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/delete.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/export.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/rest.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/rest/update.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/index.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/listers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/store.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/record/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/record/event.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/record/fake.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/config.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/request.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/transport/cache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/transport/config.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/transport/transport.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/securitycontextconstraints.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go create mode 100644 vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/mutation_detector.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go create mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go create mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go create mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/apply.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/describe.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/explain.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/generate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/history.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/run.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/scale.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/secret.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/service.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/stop.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/version.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go create mode 100644 vendor/k8s.io/kubernetes/pkg/master/ports/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/master/ports/ports.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/generic/options.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/cacher.go create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/errors.go create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/interfaces.go create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/diff/diff.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/flag/flags.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/integer/integer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/labels/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/labels/labels.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/pod/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/pod/pod.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/slice/slice.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go create mode 100644 vendor/k8s.io/kubernetes/pkg/version/.gitattributes create mode 100644 vendor/k8s.io/kubernetes/pkg/version/base.go create mode 100644 vendor/k8s.io/kubernetes/pkg/version/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/version/semver.go create mode 100644 vendor/k8s.io/kubernetes/pkg/version/version.go create mode 100644 vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go create mode 100644 vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS create mode 100644 vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go create mode 100644 vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go create mode 100644 vendor/k8s.io/kubernetes/third_party/forked/json/LICENSE create mode 100644 vendor/k8s.io/kubernetes/third_party/forked/json/fields.go create mode 100644 vendor/k8s.io/kubernetes/third_party/golang/template/exec.go create mode 100644 vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 776ad2b5..fc31bb87 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -3,7 +3,7 @@ "GoVersion": "go1.6", "GodepVersion": "v74", "Packages": [ - "./cli/main/" + "./..." ], "Deps": [ { @@ -24,6 +24,10 @@ "Comment": "v0.8.2", "Rev": "98a1428efc3d732f9e377b50c8e2113e070896cf" }, + { + "ImportPath": "github.com/beorn7/perks/quantile", + "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" + }, { "ImportPath": "github.com/blang/semver", "Comment": "v3.0.1", @@ -33,6 +37,51 @@ "ImportPath": "github.com/cloudfoundry-incubator/candiedyaml", "Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf" }, + { + "ImportPath": "github.com/coreos/go-oidc/http", + "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" + }, + { + "ImportPath": "github.com/coreos/go-oidc/jose", + "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" + }, + { + "ImportPath": "github.com/coreos/go-oidc/key", + "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" + }, + { + "ImportPath": "github.com/coreos/go-oidc/oauth2", + "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" + }, + { + "ImportPath": "github.com/coreos/go-oidc/oidc", + "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" + }, + { + "ImportPath": "github.com/coreos/go-systemd/journal", + "Comment": "v11-2-gfa8411d", + "Rev": "fa8411dcbcbad22b8542b0433914ef68b123f989" + }, + { + "ImportPath": "github.com/coreos/pkg/capnslog", + "Comment": "v2-12-g3ac0863", + "Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef" + }, + { + "ImportPath": "github.com/coreos/pkg/health", + "Comment": "v2-12-g3ac0863", + "Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef" + }, + { + "ImportPath": "github.com/coreos/pkg/httputil", + "Comment": "v2-12-g3ac0863", + "Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef" + }, + { + "ImportPath": "github.com/coreos/pkg/timeutil", + "Comment": "v2-12-g3ac0863", + "Rev": "3ac0863d7acf3bc44daf49afef8919af12f704ef" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" @@ -521,6 +570,10 @@ "Comment": "v1.2-54-g7c47e25", "Rev": "7c47e2558a0bbbaba9ecab06bc6681e73028a28a" }, + { + "ImportPath": "github.com/evanphx/json-patch", + "Rev": "465937c80b3c07a7c7ad20cc934898646a91c1de" + }, { "ImportPath": "github.com/fatih/structs", "Rev": "be738c8546f55b34e60125afa50ed73a9a9c460e" @@ -614,6 +667,19 @@ "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" }, + { + "ImportPath": "github.com/golang/groupcache/lru", + "Rev": "604ed5785183e59ae2789449d89e73f3a2a77987" + }, + { + "ImportPath": "github.com/golang/protobuf/proto", + "Rev": "b982704f8bb716bb608144408cff30e15fbde841" + }, + { + "ImportPath": "github.com/google/cadvisor/info/v1", + "Comment": "v0.23.6", + "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + }, { "ImportPath": "github.com/google/gofuzz", "Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" @@ -626,6 +692,27 @@ "ImportPath": "github.com/gorilla/mux", "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5" }, + { + "ImportPath": "github.com/imdario/mergo", + "Comment": "0.1.3-8-g6633656", + "Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc" + }, + { + "ImportPath": "github.com/inconshreveable/mousetrap", + "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + }, + { + "ImportPath": "github.com/jonboulle/clockwork", + "Rev": "3f831b65b61282ba6bece21b91beea2edc4c887a" + }, + { + "ImportPath": "github.com/juju/ratelimit", + "Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177" + }, + { + "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" + }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", "Comment": "v0.0.7", @@ -735,6 +822,36 @@ "ImportPath": "github.com/pborman/uuid", "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" }, + { + "ImportPath": "github.com/prometheus/client_golang/prometheus", + "Comment": "0.7.0-39-g3b78d7a", + "Rev": "3b78d7a77f51ccbc364d4bc170920153022cfd08" + }, + { + "ImportPath": "github.com/prometheus/client_model/go", + "Comment": "model-0.0.2-12-gfa8ad6f", + "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" + }, + { + "ImportPath": "github.com/prometheus/common/expfmt", + "Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8" + }, + { + "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", + "Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8" + }, + { + "ImportPath": "github.com/prometheus/common/model", + "Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8" + }, + { + "ImportPath": "github.com/prometheus/procfs", + "Rev": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5" + }, + { + "ImportPath": "github.com/spf13/cobra", + "Rev": "4c05eb1145f16d0e6bb4a3e1b6d769f4713cb41f" + }, { "ImportPath": "github.com/spf13/pflag", "Rev": "08b1a584251b5b62f458943640fc8ebd4d50aaa5" @@ -783,6 +900,10 @@ "ImportPath": "golang.org/x/net/context", "Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41" }, + { + "ImportPath": "golang.org/x/net/context/ctxhttp", + "Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41" + }, { "ImportPath": "golang.org/x/net/http2", "Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41" @@ -795,10 +916,38 @@ "ImportPath": "golang.org/x/net/proxy", "Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41" }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + }, + { + "ImportPath": "golang.org/x/oauth2/google", + "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + }, + { + "ImportPath": "golang.org/x/oauth2/jws", + "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + }, + { + "ImportPath": "golang.org/x/oauth2/jwt", + "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + }, { "ImportPath": "golang.org/x/sys/unix", "Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d" }, + { + "ImportPath": "google.golang.org/cloud/compute/metadata", + "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" + }, + { + "ImportPath": "google.golang.org/cloud/internal", + "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" + }, { "ImportPath": "gopkg.in/inf.v0", "Comment": "v0.9.0", @@ -808,11 +957,46 @@ "ImportPath": "gopkg.in/yaml.v2", "Rev": "a83829b6f1293c91addabc89d0571c246397bbf4" }, + { + "ImportPath": "k8s.io/kubernetes/federation/apis/federation", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/federation/apis/federation/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/federation/apis/federation/v1beta1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/api", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/api/annotations", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", "Comment": "v1.3.0-58-g57fb9ac", @@ -848,6 +1032,11 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/api/rest", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", "Comment": "v1.3.0-58-g57fb9ac", @@ -888,16 +1077,101 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1alpha1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", "Comment": "v1.3.0-58-g57fb9ac", @@ -913,6 +1187,41 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1alpha1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator", "Comment": "v1.3.0-58-g57fb9ac", @@ -928,6 +1237,111 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/cache", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/metrics", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/record", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/restclient", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/transport", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/auth", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/framework", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/conversion", "Comment": "v1.3.0-58-g57fb9ac", @@ -938,16 +1352,66 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/fields", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos/util", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/labels", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/master/ports", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/generic", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/runtime", "Comment": "v1.3.0-58-g57fb9ac", @@ -983,6 +1447,11 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontextconstraints/util", "Comment": "v1.3.0-58-g57fb9ac", @@ -993,6 +1462,11 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/storage", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/types", "Comment": "v1.3.0-58-g57fb9ac", @@ -1003,11 +1477,36 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/crypto", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/deployment", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/diff", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/errors", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/flag", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/flowcontrol", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/framer", "Comment": "v1.3.0-58-g57fb9ac", @@ -1018,6 +1517,16 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/homedir", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/integer", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/intstr", "Comment": "v1.3.0-58-g57fb9ac", @@ -1028,6 +1537,16 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/jsonpath", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/labels", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net", "Comment": "v1.3.0-58-g57fb9ac", @@ -1043,11 +1562,21 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/pod", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rand", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/replicaset", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/runtime", "Comment": "v1.3.0-58-g57fb9ac", @@ -1058,6 +1587,16 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/slice", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/strategicpatch", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/util/validation", "Comment": "v1.3.0-58-g57fb9ac", @@ -1078,6 +1617,11 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/pkg/version", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/pkg/watch", "Comment": "v1.3.0-58-g57fb9ac", @@ -1088,10 +1632,35 @@ "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" }, + { + "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/gcp", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/oidc", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/third_party/forked/json", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/reflect", "Comment": "v1.3.0-58-g57fb9ac", "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" + }, + { + "ImportPath": "k8s.io/kubernetes/third_party/golang/template", + "Comment": "v1.3.0-58-g57fb9ac", + "Rev": "57fb9acc109285378ecd0af925c8160eb8ca19e6" } ] } diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000..339177be --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 00000000..1602287d --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000..587b1fc5 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(float64(l) * q) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/coreos/go-oidc/LICENSE b/vendor/github.com/coreos/go-oidc/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/go-oidc/NOTICE b/vendor/github.com/coreos/go-oidc/NOTICE new file mode 100644 index 00000000..b39ddfa5 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/http/client.go b/vendor/github.com/coreos/go-oidc/http/client.go new file mode 100644 index 00000000..fd079b49 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/http/client.go @@ -0,0 +1,7 @@ +package http + +import "net/http" + +type Client interface { + Do(*http.Request) (*http.Response, error) +} diff --git a/vendor/github.com/coreos/go-oidc/http/http.go b/vendor/github.com/coreos/go-oidc/http/http.go new file mode 100644 index 00000000..f0d051b5 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/http/http.go @@ -0,0 +1,159 @@ +package http + +import ( + "encoding/base64" + "encoding/json" + "errors" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/coreos/pkg/capnslog" +) + +var ( + log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http") +) + +func WriteError(w http.ResponseWriter, code int, msg string) { + e := struct { + Error string `json:"error"` + }{ + Error: msg, + } + b, err := json.Marshal(e) + if err != nil { + log.Errorf("Failed marshaling %#v to JSON: %v", e, err) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + w.Write(b) +} + +// BasicAuth parses a username and password from the request's +// Authorization header. This was pulled from golang master: +// https://codereview.appspot.com/76540043 +func BasicAuth(r *http.Request) (username, password string, ok bool) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + + if !strings.HasPrefix(auth, "Basic ") { + return + } + c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} + +func cacheControlMaxAge(hdr string) (time.Duration, bool, error) { + for _, field := range strings.Split(hdr, ",") { + parts := strings.SplitN(strings.TrimSpace(field), "=", 2) + k := strings.ToLower(strings.TrimSpace(parts[0])) + if k != "max-age" { + continue + } + + if len(parts) == 1 { + return 0, false, errors.New("max-age has no value") + } + + v := strings.TrimSpace(parts[1]) + if v == "" { + return 0, false, errors.New("max-age has empty value") + } + + age, err := strconv.Atoi(v) + if err != nil { + return 0, false, err + } + + if age <= 0 { + return 0, false, nil + } + + return time.Duration(age) * time.Second, true, nil + } + + return 0, false, nil +} + +func expires(date, expires string) (time.Duration, bool, error) { + if date == "" || expires == "" { + return 0, false, nil + } + + te, err := time.Parse(time.RFC1123, expires) + if err != nil { + return 0, false, err + } + + td, err := time.Parse(time.RFC1123, date) + if err != nil { + return 0, false, err + } + + ttl := te.Sub(td) + + // headers indicate data already expired, caller should not + // have to care about this case + if ttl <= 0 { + return 0, false, nil + } + + return ttl, true, nil +} + +func Cacheable(hdr http.Header) (time.Duration, bool, error) { + ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control")) + if err != nil || ok { + return ttl, ok, err + } + + return expires(hdr.Get("Date"), hdr.Get("Expires")) +} + +// MergeQuery appends additional query values to an existing URL. +func MergeQuery(u url.URL, q url.Values) url.URL { + uv := u.Query() + for k, vs := range q { + for _, v := range vs { + uv.Add(k, v) + } + } + u.RawQuery = uv.Encode() + return u +} + +// NewResourceLocation appends a resource id to the end of the requested URL path. +func NewResourceLocation(reqURL *url.URL, id string) string { + var u url.URL + u = *reqURL + u.Path = path.Join(u.Path, id) + u.RawQuery = "" + u.Fragment = "" + return u.String() +} + +// CopyRequest returns a clone of the provided *http.Request. +// The returned object is a shallow copy of the struct and a +// deep copy of its Header field. +func CopyRequest(r *http.Request) *http.Request { + r2 := *r + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return &r2 +} diff --git a/vendor/github.com/coreos/go-oidc/http/middleware.go b/vendor/github.com/coreos/go-oidc/http/middleware.go new file mode 100644 index 00000000..270b3bc0 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/http/middleware.go @@ -0,0 +1,14 @@ +package http + +import ( + "net/http" +) + +type LoggingMiddleware struct { + Next http.Handler +} + +func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { + log.Infof("HTTP %s %v", r.Method, r.URL) + l.Next.ServeHTTP(w, r) +} diff --git a/vendor/github.com/coreos/go-oidc/http/url.go b/vendor/github.com/coreos/go-oidc/http/url.go new file mode 100644 index 00000000..df60eb1a --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/http/url.go @@ -0,0 +1,29 @@ +package http + +import ( + "errors" + "net/url" +) + +// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty +// since `url.Parse("")` does not return an error. Must contian a scheme and a host. +func ParseNonEmptyURL(u string) (*url.URL, error) { + if u == "" { + return nil, errors.New("url is empty") + } + + ur, err := url.Parse(u) + if err != nil { + return nil, err + } + + if ur.Scheme == "" { + return nil, errors.New("url scheme is empty") + } + + if ur.Host == "" { + return nil, errors.New("url host is empty") + } + + return ur, nil +} diff --git a/vendor/github.com/coreos/go-oidc/jose/claims.go b/vendor/github.com/coreos/go-oidc/jose/claims.go new file mode 100644 index 00000000..8b48bfd2 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/claims.go @@ -0,0 +1,126 @@ +package jose + +import ( + "encoding/json" + "fmt" + "math" + "time" +) + +type Claims map[string]interface{} + +func (c Claims) Add(name string, value interface{}) { + c[name] = value +} + +func (c Claims) StringClaim(name string) (string, bool, error) { + cl, ok := c[name] + if !ok { + return "", false, nil + } + + v, ok := cl.(string) + if !ok { + return "", false, fmt.Errorf("unable to parse claim as string: %v", name) + } + + return v, true, nil +} + +func (c Claims) StringsClaim(name string) ([]string, bool, error) { + cl, ok := c[name] + if !ok { + return nil, false, nil + } + + if v, ok := cl.([]string); ok { + return v, true, nil + } + + // When unmarshaled, []string will become []interface{}. + if v, ok := cl.([]interface{}); ok { + var ret []string + for _, vv := range v { + str, ok := vv.(string) + if !ok { + return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) + } + ret = append(ret, str) + } + return ret, true, nil + } + + return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) +} + +func (c Claims) Int64Claim(name string) (int64, bool, error) { + cl, ok := c[name] + if !ok { + return 0, false, nil + } + + v, ok := cl.(int64) + if !ok { + vf, ok := cl.(float64) + if !ok { + return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name) + } + v = int64(vf) + } + + return v, true, nil +} + +func (c Claims) Float64Claim(name string) (float64, bool, error) { + cl, ok := c[name] + if !ok { + return 0, false, nil + } + + v, ok := cl.(float64) + if !ok { + vi, ok := cl.(int64) + if !ok { + return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name) + } + v = float64(vi) + } + + return v, true, nil +} + +func (c Claims) TimeClaim(name string) (time.Time, bool, error) { + v, ok, err := c.Float64Claim(name) + if !ok || err != nil { + return time.Time{}, ok, err + } + + s := math.Trunc(v) + ns := (v - s) * math.Pow(10, 9) + return time.Unix(int64(s), int64(ns)).UTC(), true, nil +} + +func decodeClaims(payload []byte) (Claims, error) { + var c Claims + if err := json.Unmarshal(payload, &c); err != nil { + return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err) + } + return c, nil +} + +func marshalClaims(c Claims) ([]byte, error) { + b, err := json.Marshal(c) + if err != nil { + return nil, err + } + return b, nil +} + +func encodeClaims(c Claims) (string, error) { + b, err := marshalClaims(c) + if err != nil { + return "", err + } + + return encodeSegment(b), nil +} diff --git a/vendor/github.com/coreos/go-oidc/jose/jose.go b/vendor/github.com/coreos/go-oidc/jose/jose.go new file mode 100644 index 00000000..62099265 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/jose.go @@ -0,0 +1,112 @@ +package jose + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strings" +) + +const ( + HeaderMediaType = "typ" + HeaderKeyAlgorithm = "alg" + HeaderKeyID = "kid" +) + +const ( + // Encryption Algorithm Header Parameter Values for JWS + // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6 + AlgHS256 = "HS256" + AlgHS384 = "HS384" + AlgHS512 = "HS512" + AlgRS256 = "RS256" + AlgRS384 = "RS384" + AlgRS512 = "RS512" + AlgES256 = "ES256" + AlgES384 = "ES384" + AlgES512 = "ES512" + AlgPS256 = "PS256" + AlgPS384 = "PS384" + AlgPS512 = "PS512" + AlgNone = "none" +) + +const ( + // Algorithm Header Parameter Values for JWE + // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1 + AlgRSA15 = "RSA1_5" + AlgRSAOAEP = "RSA-OAEP" + AlgRSAOAEP256 = "RSA-OAEP-256" + AlgA128KW = "A128KW" + AlgA192KW = "A192KW" + AlgA256KW = "A256KW" + AlgDir = "dir" + AlgECDHES = "ECDH-ES" + AlgECDHESA128KW = "ECDH-ES+A128KW" + AlgECDHESA192KW = "ECDH-ES+A192KW" + AlgECDHESA256KW = "ECDH-ES+A256KW" + AlgA128GCMKW = "A128GCMKW" + AlgA192GCMKW = "A192GCMKW" + AlgA256GCMKW = "A256GCMKW" + AlgPBES2HS256A128KW = "PBES2-HS256+A128KW" + AlgPBES2HS384A192KW = "PBES2-HS384+A192KW" + AlgPBES2HS512A256KW = "PBES2-HS512+A256KW" +) + +const ( + // Encryption Algorithm Header Parameter Values for JWE + // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22 + EncA128CBCHS256 = "A128CBC-HS256" + EncA128CBCHS384 = "A128CBC-HS384" + EncA256CBCHS512 = "A256CBC-HS512" + EncA128GCM = "A128GCM" + EncA192GCM = "A192GCM" + EncA256GCM = "A256GCM" +) + +type JOSEHeader map[string]string + +func (j JOSEHeader) Validate() error { + if _, exists := j[HeaderKeyAlgorithm]; !exists { + return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm) + } + + return nil +} + +func decodeHeader(seg string) (JOSEHeader, error) { + b, err := decodeSegment(seg) + if err != nil { + return nil, err + } + + var h JOSEHeader + err = json.Unmarshal(b, &h) + if err != nil { + return nil, err + } + + return h, nil +} + +func encodeHeader(h JOSEHeader) (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + + return encodeSegment(b), nil +} + +// Decode JWT specific base64url encoding with padding stripped +func decodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l != 0 { + seg += strings.Repeat("=", 4-l) + } + return base64.URLEncoding.DecodeString(seg) +} + +// Encode JWT specific base64url encoding with padding stripped +func encodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwk.go b/vendor/github.com/coreos/go-oidc/jose/jwk.go new file mode 100644 index 00000000..b7a8e235 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/jwk.go @@ -0,0 +1,135 @@ +package jose + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "math/big" + "strings" +) + +// JSON Web Key +// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5 +type JWK struct { + ID string + Type string + Alg string + Use string + Exponent int + Modulus *big.Int + Secret []byte +} + +type jwkJSON struct { + ID string `json:"kid"` + Type string `json:"kty"` + Alg string `json:"alg"` + Use string `json:"use"` + Exponent string `json:"e"` + Modulus string `json:"n"` +} + +func (j *JWK) MarshalJSON() ([]byte, error) { + t := jwkJSON{ + ID: j.ID, + Type: j.Type, + Alg: j.Alg, + Use: j.Use, + Exponent: encodeExponent(j.Exponent), + Modulus: encodeModulus(j.Modulus), + } + + return json.Marshal(&t) +} + +func (j *JWK) UnmarshalJSON(data []byte) error { + var t jwkJSON + err := json.Unmarshal(data, &t) + if err != nil { + return err + } + + e, err := decodeExponent(t.Exponent) + if err != nil { + return err + } + + n, err := decodeModulus(t.Modulus) + if err != nil { + return err + } + + j.ID = t.ID + j.Type = t.Type + j.Alg = t.Alg + j.Use = t.Use + j.Exponent = e + j.Modulus = n + + return nil +} + +type JWKSet struct { + Keys []JWK `json:"keys"` +} + +func decodeExponent(e string) (int, error) { + decE, err := decodeBase64URLPaddingOptional(e) + if err != nil { + return 0, err + } + var eBytes []byte + if len(decE) < 8 { + eBytes = make([]byte, 8-len(decE), 8) + eBytes = append(eBytes, decE...) + } else { + eBytes = decE + } + eReader := bytes.NewReader(eBytes) + var E uint64 + err = binary.Read(eReader, binary.BigEndian, &E) + if err != nil { + return 0, err + } + return int(E), nil +} + +func encodeExponent(e int) string { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(e)) + var idx int + for ; idx < 8; idx++ { + if b[idx] != 0x0 { + break + } + } + return base64.URLEncoding.EncodeToString(b[idx:]) +} + +// Turns a URL encoded modulus of a key into a big int. +func decodeModulus(n string) (*big.Int, error) { + decN, err := decodeBase64URLPaddingOptional(n) + if err != nil { + return nil, err + } + N := big.NewInt(0) + N.SetBytes(decN) + return N, nil +} + +func encodeModulus(n *big.Int) string { + return base64.URLEncoding.EncodeToString(n.Bytes()) +} + +// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not. +// The stdlib version currently doesn't handle this. +// We can get rid of this is if this bug: +// https://github.com/golang/go/issues/4237 +// ever closes. +func decodeBase64URLPaddingOptional(e string) ([]byte, error) { + if m := len(e) % 4; m != 0 { + e += strings.Repeat("=", 4-m) + } + return base64.URLEncoding.DecodeString(e) +} diff --git a/vendor/github.com/coreos/go-oidc/jose/jws.go b/vendor/github.com/coreos/go-oidc/jose/jws.go new file mode 100644 index 00000000..1049ece8 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/jws.go @@ -0,0 +1,51 @@ +package jose + +import ( + "fmt" + "strings" +) + +type JWS struct { + RawHeader string + Header JOSEHeader + RawPayload string + Payload []byte + Signature []byte +} + +// Given a raw encoded JWS token parses it and verifies the structure. +func ParseJWS(raw string) (JWS, error) { + parts := strings.Split(raw, ".") + if len(parts) != 3 { + return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts)) + } + + rawSig := parts[2] + jws := JWS{ + RawHeader: parts[0], + RawPayload: parts[1], + } + + header, err := decodeHeader(jws.RawHeader) + if err != nil { + return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err) + } + if err = header.Validate(); err != nil { + return JWS{}, fmt.Errorf("malformed JWS, %s", err) + } + jws.Header = header + + payload, err := decodeSegment(jws.RawPayload) + if err != nil { + return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err) + } + jws.Payload = payload + + sig, err := decodeSegment(rawSig) + if err != nil { + return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err) + } + jws.Signature = sig + + return jws, nil +} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwt.go b/vendor/github.com/coreos/go-oidc/jose/jwt.go new file mode 100644 index 00000000..3b3e9634 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/jwt.go @@ -0,0 +1,82 @@ +package jose + +import "strings" + +type JWT JWS + +func ParseJWT(token string) (jwt JWT, err error) { + jws, err := ParseJWS(token) + if err != nil { + return + } + + return JWT(jws), nil +} + +func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) { + jwt = JWT{} + + jwt.Header = header + jwt.Header[HeaderMediaType] = "JWT" + + claimBytes, err := marshalClaims(claims) + if err != nil { + return + } + jwt.Payload = claimBytes + + eh, err := encodeHeader(header) + if err != nil { + return + } + jwt.RawHeader = eh + + ec, err := encodeClaims(claims) + if err != nil { + return + } + jwt.RawPayload = ec + + return +} + +func (j *JWT) KeyID() (string, bool) { + kID, ok := j.Header[HeaderKeyID] + return kID, ok +} + +func (j *JWT) Claims() (Claims, error) { + return decodeClaims(j.Payload) +} + +// Encoded data part of the token which may be signed. +func (j *JWT) Data() string { + return strings.Join([]string{j.RawHeader, j.RawPayload}, ".") +} + +// Full encoded JWT token string in format: header.claims.signature +func (j *JWT) Encode() string { + d := j.Data() + s := encodeSegment(j.Signature) + return strings.Join([]string{d, s}, ".") +} + +func NewSignedJWT(claims Claims, s Signer) (*JWT, error) { + header := JOSEHeader{ + HeaderKeyAlgorithm: s.Alg(), + HeaderKeyID: s.ID(), + } + + jwt, err := NewJWT(header, claims) + if err != nil { + return nil, err + } + + sig, err := s.Sign([]byte(jwt.Data())) + if err != nil { + return nil, err + } + jwt.Signature = sig + + return &jwt, nil +} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig.go b/vendor/github.com/coreos/go-oidc/jose/sig.go new file mode 100755 index 00000000..7b2b253c --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/sig.go @@ -0,0 +1,24 @@ +package jose + +import ( + "fmt" +) + +type Verifier interface { + ID() string + Alg() string + Verify(sig []byte, data []byte) error +} + +type Signer interface { + Verifier + Sign(data []byte) (sig []byte, err error) +} + +func NewVerifier(jwk JWK) (Verifier, error) { + if jwk.Type != "RSA" { + return nil, fmt.Errorf("unsupported key type %q", jwk.Type) + } + + return NewVerifierRSA(jwk) +} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go b/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go new file mode 100755 index 00000000..b3ca3ef3 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go @@ -0,0 +1,67 @@ +package jose + +import ( + "bytes" + "crypto" + "crypto/hmac" + _ "crypto/sha256" + "errors" + "fmt" +) + +type VerifierHMAC struct { + KeyID string + Hash crypto.Hash + Secret []byte +} + +type SignerHMAC struct { + VerifierHMAC +} + +func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) { + if jwk.Alg != "" && jwk.Alg != "HS256" { + return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) + } + + v := VerifierHMAC{ + KeyID: jwk.ID, + Secret: jwk.Secret, + Hash: crypto.SHA256, + } + + return &v, nil +} + +func (v *VerifierHMAC) ID() string { + return v.KeyID +} + +func (v *VerifierHMAC) Alg() string { + return "HS256" +} + +func (v *VerifierHMAC) Verify(sig []byte, data []byte) error { + h := hmac.New(v.Hash.New, v.Secret) + h.Write(data) + if !bytes.Equal(sig, h.Sum(nil)) { + return errors.New("invalid hmac signature") + } + return nil +} + +func NewSignerHMAC(kid string, secret []byte) *SignerHMAC { + return &SignerHMAC{ + VerifierHMAC: VerifierHMAC{ + KeyID: kid, + Secret: secret, + Hash: crypto.SHA256, + }, + } +} + +func (s *SignerHMAC) Sign(data []byte) ([]byte, error) { + h := hmac.New(s.Hash.New, s.Secret) + h.Write(data) + return h.Sum(nil), nil +} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go b/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go new file mode 100755 index 00000000..004e45dd --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go @@ -0,0 +1,67 @@ +package jose + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" +) + +type VerifierRSA struct { + KeyID string + Hash crypto.Hash + PublicKey rsa.PublicKey +} + +type SignerRSA struct { + PrivateKey rsa.PrivateKey + VerifierRSA +} + +func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) { + if jwk.Alg != "" && jwk.Alg != "RS256" { + return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) + } + + v := VerifierRSA{ + KeyID: jwk.ID, + PublicKey: rsa.PublicKey{ + N: jwk.Modulus, + E: jwk.Exponent, + }, + Hash: crypto.SHA256, + } + + return &v, nil +} + +func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA { + return &SignerRSA{ + PrivateKey: key, + VerifierRSA: VerifierRSA{ + KeyID: kid, + PublicKey: key.PublicKey, + Hash: crypto.SHA256, + }, + } +} + +func (v *VerifierRSA) ID() string { + return v.KeyID +} + +func (v *VerifierRSA) Alg() string { + return "RS256" +} + +func (v *VerifierRSA) Verify(sig []byte, data []byte) error { + h := v.Hash.New() + h.Write(data) + return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig) +} + +func (s *SignerRSA) Sign(data []byte) ([]byte, error) { + h := s.Hash.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil)) +} diff --git a/vendor/github.com/coreos/go-oidc/key/key.go b/vendor/github.com/coreos/go-oidc/key/key.go new file mode 100644 index 00000000..d0142a9e --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/key/key.go @@ -0,0 +1,153 @@ +package key + +import ( + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "encoding/json" + "math/big" + "time" + + "github.com/coreos/go-oidc/jose" +) + +func NewPublicKey(jwk jose.JWK) *PublicKey { + return &PublicKey{jwk: jwk} +} + +type PublicKey struct { + jwk jose.JWK +} + +func (k *PublicKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&k.jwk) +} + +func (k *PublicKey) UnmarshalJSON(data []byte) error { + var jwk jose.JWK + if err := json.Unmarshal(data, &jwk); err != nil { + return err + } + k.jwk = jwk + return nil +} + +func (k *PublicKey) ID() string { + return k.jwk.ID +} + +func (k *PublicKey) Verifier() (jose.Verifier, error) { + return jose.NewVerifierRSA(k.jwk) +} + +type PrivateKey struct { + KeyID string + PrivateKey *rsa.PrivateKey +} + +func (k *PrivateKey) ID() string { + return k.KeyID +} + +func (k *PrivateKey) Signer() jose.Signer { + return jose.NewSignerRSA(k.ID(), *k.PrivateKey) +} + +func (k *PrivateKey) JWK() jose.JWK { + return jose.JWK{ + ID: k.KeyID, + Type: "RSA", + Alg: "RS256", + Use: "sig", + Exponent: k.PrivateKey.PublicKey.E, + Modulus: k.PrivateKey.PublicKey.N, + } +} + +type KeySet interface { + ExpiresAt() time.Time +} + +type PublicKeySet struct { + keys []PublicKey + index map[string]*PublicKey + expiresAt time.Time +} + +func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet { + keys := make([]PublicKey, len(jwks)) + index := make(map[string]*PublicKey) + for i, jwk := range jwks { + keys[i] = *NewPublicKey(jwk) + index[keys[i].ID()] = &keys[i] + } + return &PublicKeySet{ + keys: keys, + index: index, + expiresAt: exp, + } +} + +func (s *PublicKeySet) ExpiresAt() time.Time { + return s.expiresAt +} + +func (s *PublicKeySet) Keys() []PublicKey { + return s.keys +} + +func (s *PublicKeySet) Key(id string) *PublicKey { + return s.index[id] +} + +type PrivateKeySet struct { + keys []*PrivateKey + ActiveKeyID string + expiresAt time.Time +} + +func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet { + return &PrivateKeySet{ + keys: keys, + ActiveKeyID: keys[0].ID(), + expiresAt: exp.UTC(), + } +} + +func (s *PrivateKeySet) Keys() []*PrivateKey { + return s.keys +} + +func (s *PrivateKeySet) ExpiresAt() time.Time { + return s.expiresAt +} + +func (s *PrivateKeySet) Active() *PrivateKey { + for i, k := range s.keys { + if k.ID() == s.ActiveKeyID { + return s.keys[i] + } + } + + return nil +} + +type GeneratePrivateKeyFunc func() (*PrivateKey, error) + +func GeneratePrivateKey() (*PrivateKey, error) { + pk, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, err + } + + k := PrivateKey{ + KeyID: base64BigInt(pk.PublicKey.N), + PrivateKey: pk, + } + + return &k, nil +} + +func base64BigInt(b *big.Int) string { + return base64.URLEncoding.EncodeToString(b.Bytes()) +} diff --git a/vendor/github.com/coreos/go-oidc/key/manager.go b/vendor/github.com/coreos/go-oidc/key/manager.go new file mode 100644 index 00000000..476ab6a8 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/key/manager.go @@ -0,0 +1,99 @@ +package key + +import ( + "errors" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/coreos/go-oidc/jose" + "github.com/coreos/pkg/health" +) + +type PrivateKeyManager interface { + ExpiresAt() time.Time + Signer() (jose.Signer, error) + JWKs() ([]jose.JWK, error) + PublicKeys() ([]PublicKey, error) + + WritableKeySetRepo + health.Checkable +} + +func NewPrivateKeyManager() PrivateKeyManager { + return &privateKeyManager{ + clock: clockwork.NewRealClock(), + } +} + +type privateKeyManager struct { + keySet *PrivateKeySet + clock clockwork.Clock +} + +func (m *privateKeyManager) ExpiresAt() time.Time { + if m.keySet == nil { + return m.clock.Now().UTC() + } + + return m.keySet.ExpiresAt() +} + +func (m *privateKeyManager) Signer() (jose.Signer, error) { + if err := m.Healthy(); err != nil { + return nil, err + } + + return m.keySet.Active().Signer(), nil +} + +func (m *privateKeyManager) JWKs() ([]jose.JWK, error) { + if err := m.Healthy(); err != nil { + return nil, err + } + + keys := m.keySet.Keys() + jwks := make([]jose.JWK, len(keys)) + for i, k := range keys { + jwks[i] = k.JWK() + } + return jwks, nil +} + +func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) { + jwks, err := m.JWKs() + if err != nil { + return nil, err + } + keys := make([]PublicKey, len(jwks)) + for i, jwk := range jwks { + keys[i] = *NewPublicKey(jwk) + } + return keys, nil +} + +func (m *privateKeyManager) Healthy() error { + if m.keySet == nil { + return errors.New("private key manager uninitialized") + } + + if len(m.keySet.Keys()) == 0 { + return errors.New("private key manager zero keys") + } + + if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) { + return errors.New("private key manager keys expired") + } + + return nil +} + +func (m *privateKeyManager) Set(keySet KeySet) error { + privKeySet, ok := keySet.(*PrivateKeySet) + if !ok { + return errors.New("unable to cast to PrivateKeySet") + } + + m.keySet = privKeySet + return nil +} diff --git a/vendor/github.com/coreos/go-oidc/key/repo.go b/vendor/github.com/coreos/go-oidc/key/repo.go new file mode 100644 index 00000000..1acdeb36 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/key/repo.go @@ -0,0 +1,55 @@ +package key + +import ( + "errors" + "sync" +) + +var ErrorNoKeys = errors.New("no keys found") + +type WritableKeySetRepo interface { + Set(KeySet) error +} + +type ReadableKeySetRepo interface { + Get() (KeySet, error) +} + +type PrivateKeySetRepo interface { + WritableKeySetRepo + ReadableKeySetRepo +} + +func NewPrivateKeySetRepo() PrivateKeySetRepo { + return &memPrivateKeySetRepo{} +} + +type memPrivateKeySetRepo struct { + mu sync.RWMutex + pks PrivateKeySet +} + +func (r *memPrivateKeySetRepo) Set(ks KeySet) error { + pks, ok := ks.(*PrivateKeySet) + if !ok { + return errors.New("unable to cast to PrivateKeySet") + } else if pks == nil { + return errors.New("nil KeySet") + } + + r.mu.Lock() + defer r.mu.Unlock() + + r.pks = *pks + return nil +} + +func (r *memPrivateKeySetRepo) Get() (KeySet, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if r.pks.keys == nil { + return nil, ErrorNoKeys + } + return KeySet(&r.pks), nil +} diff --git a/vendor/github.com/coreos/go-oidc/key/rotate.go b/vendor/github.com/coreos/go-oidc/key/rotate.go new file mode 100644 index 00000000..9c5508bc --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/key/rotate.go @@ -0,0 +1,165 @@ +package key + +import ( + "errors" + "time" + + "github.com/coreos/pkg/capnslog" + ptime "github.com/coreos/pkg/timeutil" + "github.com/jonboulle/clockwork" +) + +var ( + log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "key") + + ErrorPrivateKeysExpired = errors.New("private keys have expired") +) + +func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { + return &PrivateKeyRotator{ + repo: repo, + ttl: ttl, + + keep: 2, + generateKey: GeneratePrivateKey, + clock: clockwork.NewRealClock(), + } +} + +type PrivateKeyRotator struct { + repo PrivateKeySetRepo + generateKey GeneratePrivateKeyFunc + clock clockwork.Clock + keep int + ttl time.Duration +} + +func (r *PrivateKeyRotator) expiresAt() time.Time { + return r.clock.Now().UTC().Add(r.ttl) +} + +func (r *PrivateKeyRotator) Healthy() error { + pks, err := r.privateKeySet() + if err != nil { + return err + } + + if r.clock.Now().After(pks.ExpiresAt()) { + return ErrorPrivateKeysExpired + } + + return nil +} + +func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) { + ks, err := r.repo.Get() + if err != nil { + return nil, err + } + + pks, ok := ks.(*PrivateKeySet) + if !ok { + return nil, errors.New("unable to cast to PrivateKeySet") + } + return pks, nil +} + +func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) { + pks, err := r.privateKeySet() + if err == ErrorNoKeys { + log.Infof("No keys in private key set; must rotate immediately") + return 0, nil + } + if err != nil { + return 0, err + } + + now := r.clock.Now() + + // Ideally, we want to rotate after half the TTL has elapsed. + idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2) + + // If we are past the ideal rotation time, rotate immediatly. + return max(0, idealRotationTime.Sub(now)), nil +} + +func max(a, b time.Duration) time.Duration { + if a > b { + return a + } + return b +} + +func (r *PrivateKeyRotator) Run() chan struct{} { + attempt := func() { + k, err := r.generateKey() + if err != nil { + log.Errorf("Failed generating signing key: %v", err) + return + } + + exp := r.expiresAt() + if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil { + log.Errorf("Failed key rotation: %v", err) + return + } + + log.Infof("Rotated signing keys: id=%s expiresAt=%s", k.ID(), exp) + } + + stop := make(chan struct{}) + go func() { + for { + var nextRotation time.Duration + var sleep time.Duration + var err error + for { + if nextRotation, err = r.nextRotation(); err == nil { + break + } + sleep = ptime.ExpBackoff(sleep, time.Minute) + log.Errorf("error getting nextRotation, retrying in %v: %v", sleep, err) + time.Sleep(sleep) + } + + log.Infof("will rotate keys in %v", nextRotation) + select { + case <-r.clock.After(nextRotation): + attempt() + case <-stop: + return + } + } + }() + + return stop +} + +func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error { + ks, err := repo.Get() + if err != nil && err != ErrorNoKeys { + return err + } + + var keys []*PrivateKey + if ks != nil { + pks, ok := ks.(*PrivateKeySet) + if !ok { + return errors.New("unable to cast to PrivateKeySet") + } + keys = pks.Keys() + } + + keys = append([]*PrivateKey{k}, keys...) + if l := len(keys); l > keep { + keys = keys[0:keep] + } + + nks := PrivateKeySet{ + keys: keys, + ActiveKeyID: k.ID(), + expiresAt: exp, + } + + return repo.Set(KeySet(&nks)) +} diff --git a/vendor/github.com/coreos/go-oidc/key/sync.go b/vendor/github.com/coreos/go-oidc/key/sync.go new file mode 100644 index 00000000..e8d5d03d --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/key/sync.go @@ -0,0 +1,91 @@ +package key + +import ( + "errors" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/coreos/pkg/timeutil" +) + +func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { + return &KeySetSyncer{ + readable: r, + writable: w, + clock: clockwork.NewRealClock(), + } +} + +type KeySetSyncer struct { + readable ReadableKeySetRepo + writable WritableKeySetRepo + clock clockwork.Clock +} + +func (s *KeySetSyncer) Run() chan struct{} { + stop := make(chan struct{}) + go func() { + var failing bool + var next time.Duration + for { + exp, err := syncKeySet(s.readable, s.writable, s.clock) + if err != nil || exp == 0 { + if !failing { + failing = true + next = time.Second + } else { + next = timeutil.ExpBackoff(next, time.Minute) + } + if exp == 0 { + log.Errorf("Synced to already expired key set, retrying in %v: %v", next, err) + + } else { + log.Errorf("Failed syncing key set, retrying in %v: %v", next, err) + } + } else { + failing = false + next = exp / 2 + log.Infof("Synced key set, checking again in %v", next) + } + + select { + case <-s.clock.After(next): + continue + case <-stop: + return + } + } + }() + + return stop +} + +func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) { + return syncKeySet(r, w, clockwork.NewRealClock()) +} + +// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire. +// If keyset has already expired, returns a zero duration. +func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) { + var ks KeySet + ks, err = r.Get() + if err != nil { + return + } + + if ks == nil { + err = errors.New("no source KeySet") + return + } + + if err = w.Set(ks); err != nil { + return + } + + now := clock.Now() + if ks.ExpiresAt().After(now) { + exp = ks.ExpiresAt().Sub(now) + } + return +} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/error.go b/vendor/github.com/coreos/go-oidc/oauth2/error.go new file mode 100644 index 00000000..50d89094 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oauth2/error.go @@ -0,0 +1,29 @@ +package oauth2 + +const ( + ErrorAccessDenied = "access_denied" + ErrorInvalidClient = "invalid_client" + ErrorInvalidGrant = "invalid_grant" + ErrorInvalidRequest = "invalid_request" + ErrorServerError = "server_error" + ErrorUnauthorizedClient = "unauthorized_client" + ErrorUnsupportedGrantType = "unsupported_grant_type" + ErrorUnsupportedResponseType = "unsupported_response_type" +) + +type Error struct { + Type string `json:"error"` + Description string `json:"error_description,omitempty"` + State string `json:"state,omitempty"` +} + +func (e *Error) Error() string { + if e.Description != "" { + return e.Type + ": " + e.Description + } + return e.Type +} + +func NewError(typ string) *Error { + return &Error{Type: typ} +} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go new file mode 100644 index 00000000..1c68293a --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go @@ -0,0 +1,416 @@ +package oauth2 + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + phttp "github.com/coreos/go-oidc/http" +) + +// ResponseTypesEqual compares two response_type values. If either +// contains a space, it is treated as an unordered list. For example, +// comparing "code id_token" and "id_token code" would evaluate to true. +func ResponseTypesEqual(r1, r2 string) bool { + if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") { + // fast route, no split needed + return r1 == r2 + } + + // split, sort, and compare + r1Fields := strings.Fields(r1) + r2Fields := strings.Fields(r2) + if len(r1Fields) != len(r2Fields) { + return false + } + sort.Strings(r1Fields) + sort.Strings(r2Fields) + for i, r1Field := range r1Fields { + if r1Field != r2Fields[i] { + return false + } + } + return true +} + +const ( + // OAuth2.0 response types registered by OIDC. + // + // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents + ResponseTypeCode = "code" + ResponseTypeCodeIDToken = "code id_token" + ResponseTypeCodeIDTokenToken = "code id_token token" + ResponseTypeIDToken = "id_token" + ResponseTypeIDTokenToken = "id_token token" + ResponseTypeToken = "token" + ResponseTypeNone = "none" +) + +const ( + GrantTypeAuthCode = "authorization_code" + GrantTypeClientCreds = "client_credentials" + GrantTypeUserCreds = "password" + GrantTypeImplicit = "implicit" + GrantTypeRefreshToken = "refresh_token" + + AuthMethodClientSecretPost = "client_secret_post" + AuthMethodClientSecretBasic = "client_secret_basic" + AuthMethodClientSecretJWT = "client_secret_jwt" + AuthMethodPrivateKeyJWT = "private_key_jwt" +) + +type Config struct { + Credentials ClientCredentials + Scope []string + RedirectURL string + AuthURL string + TokenURL string + + // Must be one of the AuthMethodXXX methods above. Right now, only + // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported. + AuthMethod string +} + +type Client struct { + hc phttp.Client + creds ClientCredentials + scope []string + authURL *url.URL + redirectURL *url.URL + tokenURL *url.URL + authMethod string +} + +type ClientCredentials struct { + ID string + Secret string +} + +func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) { + if len(cfg.Credentials.ID) == 0 { + err = errors.New("missing client id") + return + } + + if len(cfg.Credentials.Secret) == 0 { + err = errors.New("missing client secret") + return + } + + if cfg.AuthMethod == "" { + cfg.AuthMethod = AuthMethodClientSecretBasic + } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic { + err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod) + return + } + + au, err := phttp.ParseNonEmptyURL(cfg.AuthURL) + if err != nil { + return + } + + tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL) + if err != nil { + return + } + + // Allow empty redirect URL in the case where the client + // only needs to verify a given token. + ru, err := url.Parse(cfg.RedirectURL) + if err != nil { + return + } + + c = &Client{ + creds: cfg.Credentials, + scope: cfg.Scope, + redirectURL: ru, + authURL: au, + tokenURL: tu, + hc: hc, + authMethod: cfg.AuthMethod, + } + + return +} + +// Return the embedded HTTP client +func (c *Client) HttpClient() phttp.Client { + return c.hc +} + +// Generate the url for initial redirect to oauth provider. +func (c *Client) AuthCodeURL(state, accessType, prompt string) string { + v := c.commonURLValues() + v.Set("state", state) + if strings.ToLower(accessType) == "offline" { + v.Set("access_type", "offline") + } + + if prompt != "" { + v.Set("prompt", prompt) + } + v.Set("response_type", "code") + + q := v.Encode() + u := *c.authURL + if u.RawQuery == "" { + u.RawQuery = q + } else { + u.RawQuery += "&" + q + } + return u.String() +} + +func (c *Client) commonURLValues() url.Values { + return url.Values{ + "redirect_uri": {c.redirectURL.String()}, + "scope": {strings.Join(c.scope, " ")}, + "client_id": {c.creds.ID}, + } +} + +func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) { + var req *http.Request + var err error + switch c.authMethod { + case AuthMethodClientSecretPost: + values.Set("client_secret", c.creds.Secret) + req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) + if err != nil { + return nil, err + } + case AuthMethodClientSecretBasic: + req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) + if err != nil { + return nil, err + } + encodedID := url.QueryEscape(c.creds.ID) + encodedSecret := url.QueryEscape(c.creds.Secret) + req.SetBasicAuth(encodedID, encodedSecret) + default: + panic("misconfigured client: auth method not supported") + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req, nil + +} + +// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type. +// May not be supported by all OAuth2 servers. +func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) { + v := url.Values{ + "scope": {strings.Join(scope, " ")}, + "grant_type": {GrantTypeClientCreds}, + } + + req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) + if err != nil { + return + } + + resp, err := c.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + return parseTokenResponse(resp) +} + +// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type +// May not be supported by all OAuth2 servers. +func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) { + v := url.Values{ + "scope": {strings.Join(c.scope, " ")}, + "grant_type": {GrantTypeUserCreds}, + "username": {username}, + "password": {password}, + } + + req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) + if err != nil { + return + } + + resp, err := c.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + return parseTokenResponse(resp) +} + +// RequestToken requests a token from the Token Endpoint with the specified grantType. +// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code. +// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token. +func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) { + v := c.commonURLValues() + + v.Set("grant_type", grantType) + v.Set("client_secret", c.creds.Secret) + switch grantType { + case GrantTypeAuthCode: + v.Set("code", value) + case GrantTypeRefreshToken: + v.Set("refresh_token", value) + default: + err = fmt.Errorf("unsupported grant_type: %v", grantType) + return + } + + req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) + if err != nil { + return + } + + resp, err := c.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + return parseTokenResponse(resp) +} + +func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299 + + contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return + } + + result = TokenResponse{ + RawBody: body, + } + + newError := func(typ, desc, state string) error { + if typ == "" { + return fmt.Errorf("unrecognized error %s", body) + } + return &Error{typ, desc, state} + } + + if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" { + var vals url.Values + vals, err = url.ParseQuery(string(body)) + if err != nil { + return + } + if error := vals.Get("error"); error != "" || badStatusCode { + err = newError(error, vals.Get("error_description"), vals.Get("state")) + return + } + e := vals.Get("expires_in") + if e == "" { + e = vals.Get("expires") + } + if e != "" { + result.Expires, err = strconv.Atoi(e) + if err != nil { + return + } + } + result.AccessToken = vals.Get("access_token") + result.TokenType = vals.Get("token_type") + result.IDToken = vals.Get("id_token") + result.RefreshToken = vals.Get("refresh_token") + result.Scope = vals.Get("scope") + } else { + var r struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + RefreshToken string `json:"refresh_token"` + Scope string `json:"scope"` + State string `json:"state"` + ExpiresIn int `json:"expires_in"` + Expires int `json:"expires"` + Error string `json:"error"` + Desc string `json:"error_description"` + } + if err = json.Unmarshal(body, &r); err != nil { + return + } + if r.Error != "" || badStatusCode { + err = newError(r.Error, r.Desc, r.State) + return + } + result.AccessToken = r.AccessToken + result.TokenType = r.TokenType + result.IDToken = r.IDToken + result.RefreshToken = r.RefreshToken + result.Scope = r.Scope + if r.ExpiresIn == 0 { + result.Expires = r.Expires + } else { + result.Expires = r.ExpiresIn + } + } + return +} + +type TokenResponse struct { + AccessToken string + TokenType string + Expires int + IDToken string + RefreshToken string // OPTIONAL. + Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED. + RawBody []byte // In case callers need some other non-standard info from the token response +} + +type AuthCodeRequest struct { + ResponseType string + ClientID string + RedirectURL *url.URL + Scope []string + State string +} + +func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) { + acr := AuthCodeRequest{ + ResponseType: q.Get("response_type"), + ClientID: q.Get("client_id"), + State: q.Get("state"), + Scope: make([]string, 0), + } + + qs := strings.TrimSpace(q.Get("scope")) + if qs != "" { + acr.Scope = strings.Split(qs, " ") + } + + err := func() error { + if acr.ClientID == "" { + return NewError(ErrorInvalidRequest) + } + + redirectURL := q.Get("redirect_uri") + if redirectURL != "" { + ru, err := url.Parse(redirectURL) + if err != nil { + return NewError(ErrorInvalidRequest) + } + acr.RedirectURL = ru + } + + return nil + }() + + return acr, err +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/client.go b/vendor/github.com/coreos/go-oidc/oidc/client.go new file mode 100644 index 00000000..7a3cb40f --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/client.go @@ -0,0 +1,846 @@ +package oidc + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/mail" + "net/url" + "sync" + "time" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/key" + "github.com/coreos/go-oidc/oauth2" +) + +const ( + // amount of time that must pass after the last key sync + // completes before another attempt may begin + keySyncWindow = 5 * time.Second +) + +var ( + DefaultScope = []string{"openid", "email", "profile"} + + supportedAuthMethods = map[string]struct{}{ + oauth2.AuthMethodClientSecretBasic: struct{}{}, + oauth2.AuthMethodClientSecretPost: struct{}{}, + } +) + +type ClientCredentials oauth2.ClientCredentials + +type ClientIdentity struct { + Credentials ClientCredentials + Metadata ClientMetadata +} + +type JWAOptions struct { + // SigningAlg specifies an JWA alg for signing JWTs. + // + // Specifying this field implies different actions depending on the context. It may + // require objects be serialized and signed as a JWT instead of plain JSON, or + // require an existing JWT object use the specified alg. + // + // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata + SigningAlg string + // EncryptionAlg, if provided, specifies that the returned or sent object be stored + // (or nested) within a JWT object and encrypted with the provided JWA alg. + EncryptionAlg string + // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If + // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults + // to A128CBC-HS256. + // + // If EncryptionEnc is provided EncryptionAlg must also be specified. + EncryptionEnc string +} + +func (opt JWAOptions) valid() error { + if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" { + return errors.New("encryption encoding provided with no encryption algorithm") + } + return nil +} + +func (opt JWAOptions) defaults() JWAOptions { + if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" { + opt.EncryptionEnc = jose.EncA128CBCHS256 + } + return opt +} + +var ( + // Ensure ClientMetadata satisfies these interfaces. + _ json.Marshaler = &ClientMetadata{} + _ json.Unmarshaler = &ClientMetadata{} +) + +// ClientMetadata holds metadata that the authorization server associates +// with a client identifier. The fields range from human-facing display +// strings such as client name, to items that impact the security of the +// protocol, such as the list of valid redirect URIs. +// +// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata +// +// TODO: support language specific claim representations +// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts +type ClientMetadata struct { + RedirectURIs []url.URL // Required + + // A list of OAuth 2.0 "response_type" values that the client wishes to restrict + // itself to. Either "code", "token", or another registered extension. + // + // If omitted, only "code" will be used. + ResponseTypes []string + // A list of OAuth 2.0 grant types the client wishes to restrict itself to. + // The grant type values used by OIDC are "authorization_code", "implicit", + // and "refresh_token". + // + // If ommitted, only "authorization_code" will be used. + GrantTypes []string + // "native" or "web". If omitted, "web". + ApplicationType string + + // List of email addresses. + Contacts []mail.Address + // Name of client to be presented to the end-user. + ClientName string + // URL that references a logo for the Client application. + LogoURI *url.URL + // URL of the home page of the Client. + ClientURI *url.URL + // Profile data policies and terms of use to be provided to the end user. + PolicyURI *url.URL + TermsOfServiceURI *url.URL + + // URL to or the value of the client's JSON Web Key Set document. + JWKSURI *url.URL + JWKS *jose.JWKSet + + // URL referencing a flie with a single JSON array of redirect URIs. + SectorIdentifierURI *url.URL + + SubjectType string + + // Options to restrict the JWS alg and enc values used for server responses and requests. + IDTokenResponseOptions JWAOptions + UserInfoResponseOptions JWAOptions + RequestObjectOptions JWAOptions + + // Client requested authorization method and signing options for the token endpoint. + // + // Defaults to "client_secret_basic" + TokenEndpointAuthMethod string + TokenEndpointAuthSigningAlg string + + // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized + // user must reauthroize. + // + // If 0, no limitation is placed on the maximum. + DefaultMaxAge int64 + // RequireAuthTime specifies if the auth_time claim in the ID token is required. + RequireAuthTime bool + + // Default Authentication Context Class Reference values for authentication requests. + DefaultACRValues []string + + // URI that a third party can use to initiate a login by the relaying party. + // + // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin + InitiateLoginURI *url.URL + // Pre-registered request_uri values that may be cached by the server. + RequestURIs []url.URL +} + +// Defaults returns a shallow copy of ClientMetadata with default +// values replacing omitted fields. +func (m ClientMetadata) Defaults() ClientMetadata { + if len(m.ResponseTypes) == 0 { + m.ResponseTypes = []string{oauth2.ResponseTypeCode} + } + if len(m.GrantTypes) == 0 { + m.GrantTypes = []string{oauth2.GrantTypeAuthCode} + } + if m.ApplicationType == "" { + m.ApplicationType = "web" + } + if m.TokenEndpointAuthMethod == "" { + m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic + } + m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults() + m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults() + m.RequestObjectOptions = m.RequestObjectOptions.defaults() + return m +} + +func (m *ClientMetadata) MarshalJSON() ([]byte, error) { + e := m.toEncodableStruct() + return json.Marshal(&e) +} + +func (m *ClientMetadata) UnmarshalJSON(data []byte) error { + var e encodableClientMetadata + if err := json.Unmarshal(data, &e); err != nil { + return err + } + meta, err := e.toStruct() + if err != nil { + return err + } + if err := meta.Valid(); err != nil { + return err + } + *m = meta + return nil +} + +type encodableClientMetadata struct { + RedirectURIs []string `json:"redirect_uris"` // Required + ResponseTypes []string `json:"response_types,omitempty"` + GrantTypes []string `json:"grant_types,omitempty"` + ApplicationType string `json:"application_type,omitempty"` + Contacts []string `json:"contacts,omitempty"` + ClientName string `json:"client_name,omitempty"` + LogoURI string `json:"logo_uri,omitempty"` + ClientURI string `json:"client_uri,omitempty"` + PolicyURI string `json:"policy_uri,omitempty"` + TermsOfServiceURI string `json:"tos_uri,omitempty"` + JWKSURI string `json:"jwks_uri,omitempty"` + JWKS *jose.JWKSet `json:"jwks,omitempty"` + SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"` + SubjectType string `json:"subject_type,omitempty"` + IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"` + IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"` + IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"` + UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"` + UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"` + UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"` + RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"` + RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"` + RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"` + TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"` + TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"` + DefaultMaxAge int64 `json:"default_max_age,omitempty"` + RequireAuthTime bool `json:"require_auth_time,omitempty"` + DefaultACRValues []string `json:"default_acr_values,omitempty"` + InitiateLoginURI string `json:"initiate_login_uri,omitempty"` + RequestURIs []string `json:"request_uris,omitempty"` +} + +func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) { + p := stickyErrParser{} + m := ClientMetadata{ + RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"), + ResponseTypes: c.ResponseTypes, + GrantTypes: c.GrantTypes, + ApplicationType: c.ApplicationType, + Contacts: p.parseEmails(c.Contacts, "contacts"), + ClientName: c.ClientName, + LogoURI: p.parseURI(c.LogoURI, "logo_uri"), + ClientURI: p.parseURI(c.ClientURI, "client_uri"), + PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"), + TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"), + JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"), + JWKS: c.JWKS, + SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"), + SubjectType: c.SubjectType, + TokenEndpointAuthMethod: c.TokenEndpointAuthMethod, + TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg, + DefaultMaxAge: c.DefaultMaxAge, + RequireAuthTime: c.RequireAuthTime, + DefaultACRValues: c.DefaultACRValues, + InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"), + RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"), + IDTokenResponseOptions: JWAOptions{ + c.IDTokenSignedResponseAlg, + c.IDTokenEncryptedResponseAlg, + c.IDTokenEncryptedResponseEnc, + }, + UserInfoResponseOptions: JWAOptions{ + c.UserInfoSignedResponseAlg, + c.UserInfoEncryptedResponseAlg, + c.UserInfoEncryptedResponseEnc, + }, + RequestObjectOptions: JWAOptions{ + c.RequestObjectSigningAlg, + c.RequestObjectEncryptionAlg, + c.RequestObjectEncryptionEnc, + }, + } + if p.firstErr != nil { + return ClientMetadata{}, p.firstErr + } + return m, nil +} + +// stickyErrParser parses URIs and email addresses. Once it encounters +// a parse error, subsequent calls become no-op. +type stickyErrParser struct { + firstErr error +} + +func (p *stickyErrParser) parseURI(s, field string) *url.URL { + if p.firstErr != nil || s == "" { + return nil + } + u, err := url.Parse(s) + if err == nil { + if u.Host == "" { + err = errors.New("no host in URI") + } else if u.Scheme != "http" && u.Scheme != "https" { + err = errors.New("invalid URI scheme") + } + } + if err != nil { + p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err) + return nil + } + return u +} + +func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL { + if p.firstErr != nil || len(s) == 0 { + return nil + } + uris := make([]url.URL, len(s)) + for i, val := range s { + if val == "" { + p.firstErr = fmt.Errorf("invalid URI in field %s", field) + return nil + } + if u := p.parseURI(val, field); u != nil { + uris[i] = *u + } + } + return uris +} + +func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address { + if p.firstErr != nil || len(s) == 0 { + return nil + } + addrs := make([]mail.Address, len(s)) + for i, addr := range s { + if addr == "" { + p.firstErr = fmt.Errorf("invalid email in field %s", field) + return nil + } + a, err := mail.ParseAddress(addr) + if err != nil { + p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err) + return nil + } + addrs[i] = *a + } + return addrs +} + +func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata { + return encodableClientMetadata{ + RedirectURIs: urisToStrings(m.RedirectURIs), + ResponseTypes: m.ResponseTypes, + GrantTypes: m.GrantTypes, + ApplicationType: m.ApplicationType, + Contacts: emailsToStrings(m.Contacts), + ClientName: m.ClientName, + LogoURI: uriToString(m.LogoURI), + ClientURI: uriToString(m.ClientURI), + PolicyURI: uriToString(m.PolicyURI), + TermsOfServiceURI: uriToString(m.TermsOfServiceURI), + JWKSURI: uriToString(m.JWKSURI), + JWKS: m.JWKS, + SectorIdentifierURI: uriToString(m.SectorIdentifierURI), + SubjectType: m.SubjectType, + IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg, + IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg, + IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc, + UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg, + UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg, + UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc, + RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg, + RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg, + RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc, + TokenEndpointAuthMethod: m.TokenEndpointAuthMethod, + TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg, + DefaultMaxAge: m.DefaultMaxAge, + RequireAuthTime: m.RequireAuthTime, + DefaultACRValues: m.DefaultACRValues, + InitiateLoginURI: uriToString(m.InitiateLoginURI), + RequestURIs: urisToStrings(m.RequestURIs), + } +} + +func uriToString(u *url.URL) string { + if u == nil { + return "" + } + return u.String() +} + +func urisToStrings(urls []url.URL) []string { + if len(urls) == 0 { + return nil + } + sli := make([]string, len(urls)) + for i, u := range urls { + sli[i] = u.String() + } + return sli +} + +func emailsToStrings(addrs []mail.Address) []string { + if len(addrs) == 0 { + return nil + } + sli := make([]string, len(addrs)) + for i, addr := range addrs { + sli[i] = addr.String() + } + return sli +} + +// Valid determines if a ClientMetadata conforms with the OIDC specification. +// +// Valid is called by UnmarshalJSON. +// +// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for +// URLs fields where the OIDC spec requires it. This may change in future releases +// of this package. See: https://github.com/coreos/go-oidc/issues/34 +func (m *ClientMetadata) Valid() error { + if len(m.RedirectURIs) == 0 { + return errors.New("zero redirect URLs") + } + + validURI := func(u *url.URL, fieldName string) error { + if u.Host == "" { + return fmt.Errorf("no host for uri field %s", fieldName) + } + if u.Scheme != "http" && u.Scheme != "https" { + return fmt.Errorf("uri field %s scheme is not http or https", fieldName) + } + return nil + } + + uris := []struct { + val *url.URL + name string + }{ + {m.LogoURI, "logo_uri"}, + {m.ClientURI, "client_uri"}, + {m.PolicyURI, "policy_uri"}, + {m.TermsOfServiceURI, "tos_uri"}, + {m.JWKSURI, "jwks_uri"}, + {m.SectorIdentifierURI, "sector_identifier_uri"}, + {m.InitiateLoginURI, "initiate_login_uri"}, + } + + for _, uri := range uris { + if uri.val == nil { + continue + } + if err := validURI(uri.val, uri.name); err != nil { + return err + } + } + + uriLists := []struct { + vals []url.URL + name string + }{ + {m.RedirectURIs, "redirect_uris"}, + {m.RequestURIs, "request_uris"}, + } + for _, list := range uriLists { + for _, uri := range list.vals { + if err := validURI(&uri, list.name); err != nil { + return err + } + } + } + + options := []struct { + option JWAOptions + name string + }{ + {m.IDTokenResponseOptions, "id_token response"}, + {m.UserInfoResponseOptions, "userinfo response"}, + {m.RequestObjectOptions, "request_object"}, + } + for _, option := range options { + if err := option.option.valid(); err != nil { + return fmt.Errorf("invalid JWA values for %s: %v", option.name, err) + } + } + return nil +} + +type ClientRegistrationResponse struct { + ClientID string // Required + ClientSecret string + RegistrationAccessToken string + RegistrationClientURI string + // If IsZero is true, unspecified. + ClientIDIssuedAt time.Time + // Time at which the client_secret will expire. + // If IsZero is true, it will not expire. + ClientSecretExpiresAt time.Time + + ClientMetadata +} + +type encodableClientRegistrationResponse struct { + ClientID string `json:"client_id"` // Required + ClientSecret string `json:"client_secret,omitempty"` + RegistrationAccessToken string `json:"registration_access_token,omitempty"` + RegistrationClientURI string `json:"registration_client_uri,omitempty"` + ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"` + // Time at which the client_secret will expire, in seconds since the epoch. + // If 0 it will not expire. + ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required + + encodableClientMetadata +} + +func unixToSec(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.Unix() +} + +func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) { + e := encodableClientRegistrationResponse{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RegistrationAccessToken: c.RegistrationAccessToken, + RegistrationClientURI: c.RegistrationClientURI, + ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt), + ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt), + encodableClientMetadata: c.ClientMetadata.toEncodableStruct(), + } + return json.Marshal(&e) +} + +func secToUnix(sec int64) time.Time { + if sec == 0 { + return time.Time{} + } + return time.Unix(sec, 0) +} + +func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error { + var e encodableClientRegistrationResponse + if err := json.Unmarshal(data, &e); err != nil { + return err + } + if e.ClientID == "" { + return errors.New("no client_id in client registration response") + } + metadata, err := e.encodableClientMetadata.toStruct() + if err != nil { + return err + } + *c = ClientRegistrationResponse{ + ClientID: e.ClientID, + ClientSecret: e.ClientSecret, + RegistrationAccessToken: e.RegistrationAccessToken, + RegistrationClientURI: e.RegistrationClientURI, + ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt), + ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt), + ClientMetadata: metadata, + } + return nil +} + +type ClientConfig struct { + HTTPClient phttp.Client + Credentials ClientCredentials + Scope []string + RedirectURL string + ProviderConfig ProviderConfig + KeySet key.PublicKeySet +} + +func NewClient(cfg ClientConfig) (*Client, error) { + // Allow empty redirect URL in the case where the client + // only needs to verify a given token. + ru, err := url.Parse(cfg.RedirectURL) + if err != nil { + return nil, fmt.Errorf("invalid redirect URL: %v", err) + } + + c := Client{ + credentials: cfg.Credentials, + httpClient: cfg.HTTPClient, + scope: cfg.Scope, + redirectURL: ru.String(), + providerConfig: newProviderConfigRepo(cfg.ProviderConfig), + keySet: cfg.KeySet, + } + + if c.httpClient == nil { + c.httpClient = http.DefaultClient + } + + if c.scope == nil { + c.scope = make([]string, len(DefaultScope)) + copy(c.scope, DefaultScope) + } + + return &c, nil +} + +type Client struct { + httpClient phttp.Client + providerConfig *providerConfigRepo + credentials ClientCredentials + redirectURL string + scope []string + keySet key.PublicKeySet + providerSyncer *ProviderConfigSyncer + + keySetSyncMutex sync.RWMutex + lastKeySetSync time.Time +} + +func (c *Client) Healthy() error { + now := time.Now().UTC() + + cfg := c.providerConfig.Get() + + if cfg.Empty() { + return errors.New("oidc client provider config empty") + } + + if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) { + return errors.New("oidc client provider config expired") + } + + return nil +} + +func (c *Client) OAuthClient() (*oauth2.Client, error) { + cfg := c.providerConfig.Get() + authMethod, err := chooseAuthMethod(cfg) + if err != nil { + return nil, err + } + + ocfg := oauth2.Config{ + Credentials: oauth2.ClientCredentials(c.credentials), + RedirectURL: c.redirectURL, + AuthURL: cfg.AuthEndpoint.String(), + TokenURL: cfg.TokenEndpoint.String(), + Scope: c.scope, + AuthMethod: authMethod, + } + + return oauth2.NewClient(c.httpClient, ocfg) +} + +func chooseAuthMethod(cfg ProviderConfig) (string, error) { + if len(cfg.TokenEndpointAuthMethodsSupported) == 0 { + return oauth2.AuthMethodClientSecretBasic, nil + } + + for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported { + if _, ok := supportedAuthMethods[authMethod]; ok { + return authMethod, nil + } + } + + return "", errors.New("no supported auth methods") +} + +// SyncProviderConfig starts the provider config syncer +func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} { + r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL) + s := NewProviderConfigSyncer(r, c.providerConfig) + stop := s.Run() + s.WaitUntilInitialSync() + return stop +} + +func (c *Client) maybeSyncKeys() error { + tooSoon := func() bool { + return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow)) + } + + // ignore request to sync keys if a sync operation has been + // attempted too recently + if tooSoon() { + return nil + } + + c.keySetSyncMutex.Lock() + defer c.keySetSyncMutex.Unlock() + + // check again, as another goroutine may have been holding + // the lock while updating the keys + if tooSoon() { + return nil + } + + cfg := c.providerConfig.Get() + r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String()) + w := &clientKeyRepo{client: c} + _, err := key.Sync(r, w) + c.lastKeySetSync = time.Now().UTC() + + return err +} + +type clientKeyRepo struct { + client *Client +} + +func (r *clientKeyRepo) Set(ks key.KeySet) error { + pks, ok := ks.(*key.PublicKeySet) + if !ok { + return errors.New("unable to cast to PublicKey") + } + r.client.keySet = *pks + return nil +} + +func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) { + cfg := c.providerConfig.Get() + + if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) { + return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds) + } + + oac, err := c.OAuthClient() + if err != nil { + return jose.JWT{}, err + } + + t, err := oac.ClientCredsToken(scope) + if err != nil { + return jose.JWT{}, err + } + + jwt, err := jose.ParseJWT(t.IDToken) + if err != nil { + return jose.JWT{}, err + } + + return jwt, c.VerifyJWT(jwt) +} + +// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token. +func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) { + oac, err := c.OAuthClient() + if err != nil { + return jose.JWT{}, err + } + + t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code) + if err != nil { + return jose.JWT{}, err + } + + jwt, err := jose.ParseJWT(t.IDToken) + if err != nil { + return jose.JWT{}, err + } + + return jwt, c.VerifyJWT(jwt) +} + +// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token. +func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) { + oac, err := c.OAuthClient() + if err != nil { + return jose.JWT{}, err + } + + t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken) + if err != nil { + return jose.JWT{}, err + } + + jwt, err := jose.ParseJWT(t.IDToken) + if err != nil { + return jose.JWT{}, err + } + + return jwt, c.VerifyJWT(jwt) +} + +func (c *Client) VerifyJWT(jwt jose.JWT) error { + var keysFunc func() []key.PublicKey + if kID, ok := jwt.KeyID(); ok { + keysFunc = c.keysFuncWithID(kID) + } else { + keysFunc = c.keysFuncAll() + } + + v := NewJWTVerifier( + c.providerConfig.Get().Issuer.String(), + c.credentials.ID, + c.maybeSyncKeys, keysFunc) + + return v.Verify(jwt) +} + +// keysFuncWithID returns a function that retrieves at most unexpired +// public key from the Client that matches the provided ID +func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey { + return func() []key.PublicKey { + c.keySetSyncMutex.RLock() + defer c.keySetSyncMutex.RUnlock() + + if c.keySet.ExpiresAt().Before(time.Now()) { + return []key.PublicKey{} + } + + k := c.keySet.Key(kID) + if k == nil { + return []key.PublicKey{} + } + + return []key.PublicKey{*k} + } +} + +// keysFuncAll returns a function that retrieves all unexpired public +// keys from the Client +func (c *Client) keysFuncAll() func() []key.PublicKey { + return func() []key.PublicKey { + c.keySetSyncMutex.RLock() + defer c.keySetSyncMutex.RUnlock() + + if c.keySet.ExpiresAt().Before(time.Now()) { + return []key.PublicKey{} + } + + return c.keySet.Keys() + } +} + +type providerConfigRepo struct { + mu sync.RWMutex + config ProviderConfig // do not access directly, use Get() +} + +func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo { + return &providerConfigRepo{sync.RWMutex{}, pc} +} + +// returns an error to implement ProviderConfigSetter +func (r *providerConfigRepo) Set(cfg ProviderConfig) error { + r.mu.Lock() + defer r.mu.Unlock() + r.config = cfg + return nil +} + +func (r *providerConfigRepo) Get() ProviderConfig { + r.mu.RLock() + defer r.mu.RUnlock() + return r.config +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/identity.go b/vendor/github.com/coreos/go-oidc/oidc/identity.go new file mode 100644 index 00000000..9bfa8e34 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/identity.go @@ -0,0 +1,44 @@ +package oidc + +import ( + "errors" + "time" + + "github.com/coreos/go-oidc/jose" +) + +type Identity struct { + ID string + Name string + Email string + ExpiresAt time.Time +} + +func IdentityFromClaims(claims jose.Claims) (*Identity, error) { + if claims == nil { + return nil, errors.New("nil claim set") + } + + var ident Identity + var err error + var ok bool + + if ident.ID, ok, err = claims.StringClaim("sub"); err != nil { + return nil, err + } else if !ok { + return nil, errors.New("missing required claim: sub") + } + + if ident.Email, _, err = claims.StringClaim("email"); err != nil { + return nil, err + } + + exp, ok, err := claims.TimeClaim("exp") + if err != nil { + return nil, err + } else if ok { + ident.ExpiresAt = exp + } + + return &ident, nil +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/interface.go b/vendor/github.com/coreos/go-oidc/oidc/interface.go new file mode 100644 index 00000000..248cac0b --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/interface.go @@ -0,0 +1,3 @@ +package oidc + +type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error) diff --git a/vendor/github.com/coreos/go-oidc/oidc/key.go b/vendor/github.com/coreos/go-oidc/oidc/key.go new file mode 100755 index 00000000..82a0f567 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/key.go @@ -0,0 +1,67 @@ +package oidc + +import ( + "encoding/json" + "errors" + "net/http" + "time" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/key" +) + +// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no +// Cache-Control header is provided by the JWK Set document endpoint. +const DefaultPublicKeySetTTL = 24 * time.Hour + +// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document. +func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo { + return &remotePublicKeyRepo{hc: hc, ep: ep} +} + +type remotePublicKeyRepo struct { + hc phttp.Client + ep string +} + +// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL +// is set on the Key Set to avoid it having to be re-retrieved for every +// encryption event. This TTL is typically controlled by the endpoint returning +// a Cache-Control header, but defaults to 24 hours if no Cache-Control header +// is found. +func (r *remotePublicKeyRepo) Get() (key.KeySet, error) { + req, err := http.NewRequest("GET", r.ep, nil) + if err != nil { + return nil, err + } + + resp, err := r.hc.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var d struct { + Keys []jose.JWK `json:"keys"` + } + if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { + return nil, err + } + + if len(d.Keys) == 0 { + return nil, errors.New("zero keys in response") + } + + ttl, ok, err := phttp.Cacheable(resp.Header) + if err != nil { + return nil, err + } + if !ok { + ttl = DefaultPublicKeySetTTL + } + + exp := time.Now().UTC().Add(ttl) + ks := key.NewPublicKeySet(d.Keys, exp) + return ks, nil +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/provider.go b/vendor/github.com/coreos/go-oidc/oidc/provider.go new file mode 100644 index 00000000..1235890c --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/provider.go @@ -0,0 +1,688 @@ +package oidc + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/coreos/pkg/capnslog" + "github.com/coreos/pkg/timeutil" + "github.com/jonboulle/clockwork" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/oauth2" +) + +var ( + log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http") +) + +const ( + // Subject Identifier types defined by the OIDC spec. Specifies if the provider + // should provide the same sub claim value to all clients (public) or a unique + // value for each client (pairwise). + // + // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes + SubjectTypePublic = "public" + SubjectTypePairwise = "pairwise" +) + +var ( + // Default values for omitted provider config fields. + // + // Use ProviderConfig's Defaults method to fill a provider config with these values. + DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit} + DefaultResponseModesSupported = []string{"query", "fragment"} + DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic} + DefaultClaimTypesSupported = []string{"normal"} +) + +const ( + MaximumProviderConfigSyncInterval = 24 * time.Hour + MinimumProviderConfigSyncInterval = time.Minute + + discoveryConfigPath = "/.well-known/openid-configuration" +) + +// internally configurable for tests +var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval + +var ( + // Ensure ProviderConfig satisfies these interfaces. + _ json.Marshaler = &ProviderConfig{} + _ json.Unmarshaler = &ProviderConfig{} +) + +// ProviderConfig represents the OpenID Provider Metadata specifying what +// configurations a provider supports. +// +// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +type ProviderConfig struct { + Issuer *url.URL // Required + AuthEndpoint *url.URL // Required + TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported + UserInfoEndpoint *url.URL + KeysEndpoint *url.URL // Required + RegistrationEndpoint *url.URL + + // Servers MAY choose not to advertise some supported scope values even when this + // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported. + ScopesSupported []string + // OAuth2.0 response types supported. + ResponseTypesSupported []string // Required + // OAuth2.0 response modes supported. + // + // If omitted, defaults to DefaultResponseModesSupported. + ResponseModesSupported []string + // OAuth2.0 grant types supported. + // + // If omitted, defaults to DefaultGrantTypesSupported. + GrantTypesSupported []string + ACRValuesSupported []string + // SubjectTypesSupported specifies strategies for providing values for the sub claim. + SubjectTypesSupported []string // Required + + // JWA signing and encryption algorith values supported for ID tokens. + IDTokenSigningAlgValues []string // Required + IDTokenEncryptionAlgValues []string + IDTokenEncryptionEncValues []string + + // JWA signing and encryption algorith values supported for user info responses. + UserInfoSigningAlgValues []string + UserInfoEncryptionAlgValues []string + UserInfoEncryptionEncValues []string + + // JWA signing and encryption algorith values supported for request objects. + ReqObjSigningAlgValues []string + ReqObjEncryptionAlgValues []string + ReqObjEncryptionEncValues []string + + TokenEndpointAuthMethodsSupported []string + TokenEndpointAuthSigningAlgValuesSupported []string + DisplayValuesSupported []string + ClaimTypesSupported []string + ClaimsSupported []string + ServiceDocs *url.URL + ClaimsLocalsSupported []string + UILocalsSupported []string + ClaimsParameterSupported bool + RequestParameterSupported bool + RequestURIParamaterSupported bool + RequireRequestURIRegistration bool + + Policy *url.URL + TermsOfService *url.URL + + // Not part of the OpenID Provider Metadata + ExpiresAt time.Time +} + +// Defaults returns a shallow copy of ProviderConfig with default +// values replacing omitted fields. +// +// var cfg oidc.ProviderConfig +// // Fill provider config with default values for omitted fields. +// cfg = cfg.Defaults() +// +func (p ProviderConfig) Defaults() ProviderConfig { + setDefault := func(val *[]string, defaultVal []string) { + if len(*val) == 0 { + *val = defaultVal + } + } + setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported) + setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported) + setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported) + setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported) + return p +} + +func (p *ProviderConfig) MarshalJSON() ([]byte, error) { + e := p.toEncodableStruct() + return json.Marshal(&e) +} + +func (p *ProviderConfig) UnmarshalJSON(data []byte) error { + var e encodableProviderConfig + if err := json.Unmarshal(data, &e); err != nil { + return err + } + conf, err := e.toStruct() + if err != nil { + return err + } + if err := conf.Valid(); err != nil { + return err + } + *p = conf + return nil +} + +type encodableProviderConfig struct { + Issuer string `json:"issuer"` + AuthEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"` + KeysEndpoint string `json:"jwks_uri"` + RegistrationEndpoint string `json:"registration_endpoint,omitempty"` + + // Use 'omitempty' for all slices as per OIDC spec: + // "Claims that return multiple values are represented as JSON arrays. + // Claims with zero elements MUST be omitted from the response." + // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse + + ScopesSupported []string `json:"scopes_supported,omitempty"` + ResponseTypesSupported []string `json:"response_types_supported,omitempty"` + ResponseModesSupported []string `json:"response_modes_supported,omitempty"` + GrantTypesSupported []string `json:"grant_types_supported,omitempty"` + ACRValuesSupported []string `json:"acr_values_supported,omitempty"` + SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` + + IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"` + IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"` + IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"` + UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"` + UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"` + UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"` + ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"` + ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"` + ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"` + + TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"` + TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"` + + DisplayValuesSupported []string `json:"display_values_supported,omitempty"` + ClaimTypesSupported []string `json:"claim_types_supported,omitempty"` + ClaimsSupported []string `json:"claims_supported,omitempty"` + ServiceDocs string `json:"service_documentation,omitempty"` + ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"` + UILocalsSupported []string `json:"ui_locales_supported,omitempty"` + ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"` + RequestParameterSupported bool `json:"request_parameter_supported,omitempty"` + RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"` + RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"` + + Policy string `json:"op_policy_uri,omitempty"` + TermsOfService string `json:"op_tos_uri,omitempty"` +} + +func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig { + return encodableProviderConfig{ + Issuer: uriToString(cfg.Issuer), + AuthEndpoint: uriToString(cfg.AuthEndpoint), + TokenEndpoint: uriToString(cfg.TokenEndpoint), + UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint), + KeysEndpoint: uriToString(cfg.KeysEndpoint), + RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint), + ScopesSupported: cfg.ScopesSupported, + ResponseTypesSupported: cfg.ResponseTypesSupported, + ResponseModesSupported: cfg.ResponseModesSupported, + GrantTypesSupported: cfg.GrantTypesSupported, + ACRValuesSupported: cfg.ACRValuesSupported, + SubjectTypesSupported: cfg.SubjectTypesSupported, + IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues, + IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues, + IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues, + UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues, + UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues, + UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues, + ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues, + ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues, + ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues, + TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported, + TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported, + DisplayValuesSupported: cfg.DisplayValuesSupported, + ClaimTypesSupported: cfg.ClaimTypesSupported, + ClaimsSupported: cfg.ClaimsSupported, + ServiceDocs: uriToString(cfg.ServiceDocs), + ClaimsLocalsSupported: cfg.ClaimsLocalsSupported, + UILocalsSupported: cfg.UILocalsSupported, + ClaimsParameterSupported: cfg.ClaimsParameterSupported, + RequestParameterSupported: cfg.RequestParameterSupported, + RequestURIParamaterSupported: cfg.RequestURIParamaterSupported, + RequireRequestURIRegistration: cfg.RequireRequestURIRegistration, + Policy: uriToString(cfg.Policy), + TermsOfService: uriToString(cfg.TermsOfService), + } +} + +func (e encodableProviderConfig) toStruct() (ProviderConfig, error) { + p := stickyErrParser{} + conf := ProviderConfig{ + Issuer: p.parseURI(e.Issuer, "issuer"), + AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"), + TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"), + UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"), + KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"), + RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"), + ScopesSupported: e.ScopesSupported, + ResponseTypesSupported: e.ResponseTypesSupported, + ResponseModesSupported: e.ResponseModesSupported, + GrantTypesSupported: e.GrantTypesSupported, + ACRValuesSupported: e.ACRValuesSupported, + SubjectTypesSupported: e.SubjectTypesSupported, + IDTokenSigningAlgValues: e.IDTokenSigningAlgValues, + IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues, + IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues, + UserInfoSigningAlgValues: e.UserInfoSigningAlgValues, + UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues, + UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues, + ReqObjSigningAlgValues: e.ReqObjSigningAlgValues, + ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues, + ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues, + TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported, + TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported, + DisplayValuesSupported: e.DisplayValuesSupported, + ClaimTypesSupported: e.ClaimTypesSupported, + ClaimsSupported: e.ClaimsSupported, + ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"), + ClaimsLocalsSupported: e.ClaimsLocalsSupported, + UILocalsSupported: e.UILocalsSupported, + ClaimsParameterSupported: e.ClaimsParameterSupported, + RequestParameterSupported: e.RequestParameterSupported, + RequestURIParamaterSupported: e.RequestURIParamaterSupported, + RequireRequestURIRegistration: e.RequireRequestURIRegistration, + Policy: p.parseURI(e.Policy, "op_policy-uri"), + TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"), + } + if p.firstErr != nil { + return ProviderConfig{}, p.firstErr + } + return conf, nil +} + +// Empty returns if a ProviderConfig holds no information. +// +// This case generally indicates a ProviderConfigGetter has experienced an error +// and has nothing to report. +func (p ProviderConfig) Empty() bool { + return p.Issuer == nil +} + +func contains(sli []string, ele string) bool { + for _, s := range sli { + if s == ele { + return true + } + } + return false +} + +// Valid determines if a ProviderConfig conforms with the OIDC specification. +// If Valid returns successfully it guarantees required field are non-nil and +// URLs are well formed. +// +// Valid is called by UnmarshalJSON. +// +// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for +// URLs fields where the OIDC spec requires it. This may change in future releases +// of this package. See: https://github.com/coreos/go-oidc/issues/34 +func (p ProviderConfig) Valid() error { + grantTypes := p.GrantTypesSupported + if len(grantTypes) == 0 { + grantTypes = DefaultGrantTypesSupported + } + implicitOnly := true + for _, grantType := range grantTypes { + if grantType != oauth2.GrantTypeImplicit { + implicitOnly = false + break + } + } + + if len(p.SubjectTypesSupported) == 0 { + return errors.New("missing required field subject_types_supported") + } + if len(p.IDTokenSigningAlgValues) == 0 { + return errors.New("missing required field id_token_signing_alg_values_supported") + } + + if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") { + return errors.New("scoped_supported must be unspecified or include 'openid'") + } + + if !contains(p.IDTokenSigningAlgValues, "RS256") { + return errors.New("id_token_signing_alg_values_supported must include 'RS256'") + } + if contains(p.TokenEndpointAuthMethodsSupported, "none") { + return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'") + } + + uris := []struct { + val *url.URL + name string + required bool + }{ + {p.Issuer, "issuer", true}, + {p.AuthEndpoint, "authorization_endpoint", true}, + {p.TokenEndpoint, "token_endpoint", !implicitOnly}, + {p.UserInfoEndpoint, "userinfo_endpoint", false}, + {p.KeysEndpoint, "jwks_uri", true}, + {p.RegistrationEndpoint, "registration_endpoint", false}, + {p.ServiceDocs, "service_documentation", false}, + {p.Policy, "op_policy_uri", false}, + {p.TermsOfService, "op_tos_uri", false}, + } + + for _, uri := range uris { + if uri.val == nil { + if !uri.required { + continue + } + return fmt.Errorf("empty value for required uri field %s", uri.name) + } + if uri.val.Host == "" { + return fmt.Errorf("no host for uri field %s", uri.name) + } + if uri.val.Scheme != "http" && uri.val.Scheme != "https" { + return fmt.Errorf("uri field %s schemeis not http or https", uri.name) + } + } + return nil +} + +// Supports determines if provider supports a client given their respective metadata. +func (p ProviderConfig) Supports(c ClientMetadata) error { + if err := p.Valid(); err != nil { + return fmt.Errorf("invalid provider config: %v", err) + } + if err := c.Valid(); err != nil { + return fmt.Errorf("invalid client config: %v", err) + } + + // Fill default values for omitted fields + c = c.Defaults() + p = p.Defaults() + + // Do the supported values list the requested one? + supports := []struct { + supported []string + requested string + name string + }{ + {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"}, + {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"}, + {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"}, + {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"}, + {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"}, + {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"}, + {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"}, + {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"}, + {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"}, + } + for _, field := range supports { + if field.requested == "" { + continue + } + if !contains(field.supported, field.requested) { + return fmt.Errorf("provider does not support requested value for field %s", field.name) + } + } + + stringsEqual := func(s1, s2 string) bool { return s1 == s2 } + + // For lists, are the list of requested values a subset of the supported ones? + supportsAll := []struct { + supported []string + requested []string + name string + // OAuth2.0 response_type can be space separated lists where order doesn't matter. + // For example "id_token token" is the same as "token id_token" + // Support a custom compare method. + comp func(s1, s2 string) bool + }{ + {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual}, + {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual}, + } + for _, field := range supportsAll { + requestLoop: + for _, req := range field.requested { + for _, sup := range field.supported { + if field.comp(req, sup) { + continue requestLoop + } + } + return fmt.Errorf("provider does not support requested value for field %s", field.name) + } + } + + // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about? + + return nil +} + +func (p ProviderConfig) SupportsGrantType(grantType string) bool { + var supported []string + if len(p.GrantTypesSupported) == 0 { + supported = DefaultGrantTypesSupported + } else { + supported = p.GrantTypesSupported + } + + for _, t := range supported { + if t == grantType { + return true + } + } + return false +} + +type ProviderConfigGetter interface { + Get() (ProviderConfig, error) +} + +type ProviderConfigSetter interface { + Set(ProviderConfig) error +} + +type ProviderConfigSyncer struct { + from ProviderConfigGetter + to ProviderConfigSetter + clock clockwork.Clock + + initialSyncDone bool + initialSyncWait sync.WaitGroup +} + +func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer { + return &ProviderConfigSyncer{ + from: from, + to: to, + clock: clockwork.NewRealClock(), + } +} + +func (s *ProviderConfigSyncer) Run() chan struct{} { + stop := make(chan struct{}) + + var next pcsStepper + next = &pcsStepNext{aft: time.Duration(0)} + + s.initialSyncWait.Add(1) + go func() { + for { + select { + case <-s.clock.After(next.after()): + next = next.step(s.sync) + case <-stop: + return + } + } + }() + + return stop +} + +func (s *ProviderConfigSyncer) WaitUntilInitialSync() { + s.initialSyncWait.Wait() +} + +func (s *ProviderConfigSyncer) sync() (time.Duration, error) { + cfg, err := s.from.Get() + if err != nil { + return 0, err + } + + if err = s.to.Set(cfg); err != nil { + return 0, fmt.Errorf("error setting provider config: %v", err) + } + + if !s.initialSyncDone { + s.initialSyncWait.Done() + s.initialSyncDone = true + } + + log.Infof("Updating provider config: config=%#v", cfg) + + return nextSyncAfter(cfg.ExpiresAt, s.clock), nil +} + +type pcsStepFunc func() (time.Duration, error) + +type pcsStepper interface { + after() time.Duration + step(pcsStepFunc) pcsStepper +} + +type pcsStepNext struct { + aft time.Duration +} + +func (n *pcsStepNext) after() time.Duration { + return n.aft +} + +func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) { + ttl, err := fn() + if err == nil { + next = &pcsStepNext{aft: ttl} + log.Debugf("Synced provider config, next attempt in %v", next.after()) + } else { + next = &pcsStepRetry{aft: time.Second} + log.Errorf("Provider config sync failed, retrying in %v: %v", next.after(), err) + } + return +} + +type pcsStepRetry struct { + aft time.Duration +} + +func (r *pcsStepRetry) after() time.Duration { + return r.aft +} + +func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) { + ttl, err := fn() + if err == nil { + next = &pcsStepNext{aft: ttl} + log.Infof("Provider config sync no longer failing") + } else { + next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)} + log.Errorf("Provider config sync still failing, retrying in %v: %v", next.after(), err) + } + return +} + +func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration { + if exp.IsZero() { + return MaximumProviderConfigSyncInterval + } + + t := exp.Sub(clock.Now()) / 2 + if t > MaximumProviderConfigSyncInterval { + t = MaximumProviderConfigSyncInterval + } else if t < minimumProviderConfigSyncInterval { + t = minimumProviderConfigSyncInterval + } + + return t +} + +type httpProviderConfigGetter struct { + hc phttp.Client + issuerURL string + clock clockwork.Clock +} + +func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter { + return &httpProviderConfigGetter{ + hc: hc, + issuerURL: issuerURL, + clock: clockwork.NewRealClock(), + } +} + +func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) { + // If the Issuer value contains a path component, any terminating / MUST be removed before + // appending /.well-known/openid-configuration. + // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest + discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath + req, err := http.NewRequest("GET", discoveryURL, nil) + if err != nil { + return + } + + resp, err := r.hc.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil { + return + } + + var ttl time.Duration + var ok bool + ttl, ok, err = phttp.Cacheable(resp.Header) + if err != nil { + return + } else if ok { + cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl) + } + + // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information. + // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation + if !urlEqual(cfg.Issuer.String(), r.issuerURL) { + err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL) + return + } + + return +} + +func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) { + if hc == nil { + hc = http.DefaultClient + } + + g := NewHTTPProviderConfigGetter(hc, issuerURL) + return g.Get() +} + +func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) { + return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock()) +} + +func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) { + var sleep time.Duration + var err error + for { + pcfg, err = FetchProviderConfig(hc, issuerURL) + if err == nil { + break + } + + sleep = timeutil.ExpBackoff(sleep, time.Minute) + fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err) + time.Sleep(sleep) + } + + return +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/transport.go b/vendor/github.com/coreos/go-oidc/oidc/transport.go new file mode 100644 index 00000000..61c926d7 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/transport.go @@ -0,0 +1,88 @@ +package oidc + +import ( + "fmt" + "net/http" + "sync" + + phttp "github.com/coreos/go-oidc/http" + "github.com/coreos/go-oidc/jose" +) + +type TokenRefresher interface { + // Verify checks if the provided token is currently valid or not. + Verify(jose.JWT) error + + // Refresh attempts to authenticate and retrieve a new token. + Refresh() (jose.JWT, error) +} + +type ClientCredsTokenRefresher struct { + Issuer string + OIDCClient *Client +} + +func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) { + _, err = VerifyClientClaims(jwt, c.Issuer) + return +} + +func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) { + if err = c.OIDCClient.Healthy(); err != nil { + err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err) + return + } + + jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"}) + if err != nil { + err = fmt.Errorf("unable to verify auth code with issuer: %v", err) + return + } + + return +} + +type AuthenticatedTransport struct { + TokenRefresher + http.RoundTripper + + mu sync.Mutex + jwt jose.JWT +} + +func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.TokenRefresher.Verify(t.jwt) == nil { + return t.jwt, nil + } + + jwt, err := t.TokenRefresher.Refresh() + if err != nil { + return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err) + } + + t.jwt = jwt + return t.jwt, nil +} + +// SetJWT sets the JWT held by the Transport. +// This is useful for cases in which you want to set an initial JWT. +func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) { + t.mu.Lock() + defer t.mu.Unlock() + + t.jwt = jwt +} + +func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) { + jwt, err := t.verifiedJWT() + if err != nil { + return nil, err + } + + req := phttp.CopyRequest(r) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode())) + return t.RoundTripper.RoundTrip(req) +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/util.go b/vendor/github.com/coreos/go-oidc/oidc/util.go new file mode 100644 index 00000000..f2a5a195 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/util.go @@ -0,0 +1,109 @@ +package oidc + +import ( + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/coreos/go-oidc/jose" +) + +// RequestTokenExtractor funcs extract a raw encoded token from a request. +type RequestTokenExtractor func(r *http.Request) (string, error) + +// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's +// Authorization header. +func ExtractBearerToken(r *http.Request) (string, error) { + ah := r.Header.Get("Authorization") + if ah == "" { + return "", errors.New("missing Authorization header") + } + + if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { + return "", errors.New("should be a bearer token") + } + + val := ah[7:] + if len(val) == 0 { + return "", errors.New("bearer token is empty") + } + + return val, nil +} + +// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request. +func CookieTokenExtractor(cookieName string) RequestTokenExtractor { + return func(r *http.Request) (string, error) { + ck, err := r.Cookie(cookieName) + if err != nil { + return "", fmt.Errorf("token cookie not found in request: %v", err) + } + + if ck.Value == "" { + return "", errors.New("token cookie found but is empty") + } + + return ck.Value, nil + } +} + +func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims { + return jose.Claims{ + // required + "iss": iss, + "sub": sub, + "aud": aud, + "iat": iat.Unix(), + "exp": exp.Unix(), + } +} + +func GenClientID(hostport string) (string, error) { + b, err := randBytes(32) + if err != nil { + return "", err + } + + var host string + if strings.Contains(hostport, ":") { + host, _, err = net.SplitHostPort(hostport) + if err != nil { + return "", err + } + } else { + host = hostport + } + + return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil +} + +func randBytes(n int) ([]byte, error) { + b := make([]byte, n) + got, err := rand.Read(b) + if err != nil { + return nil, err + } else if n != got { + return nil, errors.New("unable to generate enough random data") + } + return b, nil +} + +// urlEqual checks two urls for equality using only the host and path portions. +func urlEqual(url1, url2 string) bool { + u1, err := url.Parse(url1) + if err != nil { + return false + } + u2, err := url.Parse(url2) + if err != nil { + return false + } + + return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) +} diff --git a/vendor/github.com/coreos/go-oidc/oidc/verification.go b/vendor/github.com/coreos/go-oidc/oidc/verification.go new file mode 100644 index 00000000..00241304 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/oidc/verification.go @@ -0,0 +1,188 @@ +package oidc + +import ( + "errors" + "fmt" + "time" + + "github.com/jonboulle/clockwork" + + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/key" +) + +func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) { + jwtBytes := []byte(jwt.Data()) + for _, k := range keys { + v, err := k.Verifier() + if err != nil { + return false, err + } + if v.Verify(jwt.Signature, jwtBytes) == nil { + return true, nil + } + } + return false, nil +} + +// containsString returns true if the given string(needle) is found +// in the string array(haystack). +func containsString(needle string, haystack []string) bool { + for _, v := range haystack { + if v == needle { + return true + } + } + return false +} + +// Verify claims in accordance with OIDC spec +// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation +func VerifyClaims(jwt jose.JWT, issuer, clientID string) error { + now := time.Now().UTC() + + claims, err := jwt.Claims() + if err != nil { + return err + } + + ident, err := IdentityFromClaims(claims) + if err != nil { + return err + } + + if ident.ExpiresAt.Before(now) { + return errors.New("token is expired") + } + + // iss REQUIRED. Issuer Identifier for the Issuer of the response. + // The iss value is a case sensitive URL using the https scheme that contains scheme, + // host, and optionally, port number and path components and no query or fragment components. + if iss, exists := claims["iss"].(string); exists { + if !urlEqual(iss, issuer) { + return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss) + } + } else { + return errors.New("missing claim: 'iss'") + } + + // iat REQUIRED. Time at which the JWT was issued. + // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z + // as measured in UTC until the date/time. + if _, exists := claims["iat"].(float64); !exists { + return errors.New("missing claim: 'iat'") + } + + // aud REQUIRED. Audience(s) that this ID Token is intended for. + // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value. + // It MAY also contain identifiers for other audiences. In the general case, the aud + // value is an array of case sensitive strings. In the common special case when there + // is one audience, the aud value MAY be a single case sensitive string. + if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { + if aud != clientID { + return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID) + } + } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { + if !containsString(clientID, aud) { + return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID) + } + } else { + return errors.New("invalid claim value: 'aud' is required, and should be either string or string array") + } + + return nil +} + +// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT. +// Returns the client ID if valid, or an error if invalid. +func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) { + claims, err := jwt.Claims() + if err != nil { + return "", fmt.Errorf("failed to parse JWT claims: %v", err) + } + + iss, ok, err := claims.StringClaim("iss") + if err != nil { + return "", fmt.Errorf("failed to parse 'iss' claim: %v", err) + } else if !ok { + return "", errors.New("missing required 'iss' claim") + } else if !urlEqual(iss, issuer) { + return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss) + } + + sub, ok, err := claims.StringClaim("sub") + if err != nil { + return "", fmt.Errorf("failed to parse 'sub' claim: %v", err) + } else if !ok { + return "", errors.New("missing required 'sub' claim") + } + + if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { + if aud != sub { + return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub) + } + } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { + if !containsString(sub, aud) { + return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub) + } + } else { + return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array") + } + + now := time.Now().UTC() + exp, ok, err := claims.TimeClaim("exp") + if err != nil { + return "", fmt.Errorf("failed to parse 'exp' claim: %v", err) + } else if !ok { + return "", errors.New("missing required 'exp' claim") + } else if exp.Before(now) { + return "", fmt.Errorf("token already expired at: %v", exp) + } + + return sub, nil +} + +type JWTVerifier struct { + issuer string + clientID string + syncFunc func() error + keysFunc func() []key.PublicKey + clock clockwork.Clock +} + +func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier { + return JWTVerifier{ + issuer: issuer, + clientID: clientID, + syncFunc: syncFunc, + keysFunc: keysFunc, + clock: clockwork.NewRealClock(), + } +} + +func (v *JWTVerifier) Verify(jwt jose.JWT) error { + ok, err := VerifySignature(jwt, v.keysFunc()) + if ok { + goto SignatureVerified + } else if err != nil { + return fmt.Errorf("oidc: JWT signature verification failed: %v", err) + } + + if err = v.syncFunc(); err != nil { + return fmt.Errorf("oidc: failed syncing KeySet: %v", err) + } + + ok, err = VerifySignature(jwt, v.keysFunc()) + if err != nil { + return fmt.Errorf("oidc: JWT signature verification failed: %v", err) + } else if !ok { + return errors.New("oidc: unable to verify JWT signature: no matching keys") + } + +SignatureVerified: + if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil { + return fmt.Errorf("oidc: JWT claims invalid: %v", err) + } + + return nil +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 00000000..7f434990 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,179 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true if the local systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 00000000..b39ddfa5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 00000000..81efb1fb --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 00000000..b305a845 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 00000000..426603ef --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 00000000..44b8cd36 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` pacakge uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 00000000..45530506 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 00000000..72e05207 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 00000000..970086b9 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 00000000..84954488 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 00000000..612d55c6 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,177 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 00000000..4be5a1f2 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/coreos/pkg/health/README.md b/vendor/github.com/coreos/pkg/health/README.md new file mode 100644 index 00000000..5ec34c21 --- /dev/null +++ b/vendor/github.com/coreos/pkg/health/README.md @@ -0,0 +1,11 @@ +health +==== + +A simple framework for implementing an HTTP health check endpoint on servers. + +Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`. + +### Documentation + +For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health) + diff --git a/vendor/github.com/coreos/pkg/health/health.go b/vendor/github.com/coreos/pkg/health/health.go new file mode 100644 index 00000000..a1c3610f --- /dev/null +++ b/vendor/github.com/coreos/pkg/health/health.go @@ -0,0 +1,127 @@ +package health + +import ( + "expvar" + "fmt" + "log" + "net/http" + + "github.com/coreos/pkg/httputil" +) + +// Checkables should return nil when the thing they are checking is healthy, and an error otherwise. +type Checkable interface { + Healthy() error +} + +// Checker provides a way to make an endpoint which can be probed for system health. +type Checker struct { + // Checks are the Checkables to be checked when probing. + Checks []Checkable + + // Unhealthyhandler is called when one or more of the checks are unhealthy. + // If not provided DefaultUnhealthyHandler is called. + UnhealthyHandler UnhealthyHandler + + // HealthyHandler is called when all checks are healthy. + // If not provided, DefaultHealthyHandler is called. + HealthyHandler http.HandlerFunc +} + +func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) { + unhealthyHandler := c.UnhealthyHandler + if unhealthyHandler == nil { + unhealthyHandler = DefaultUnhealthyHandler + } + + successHandler := c.HealthyHandler + if successHandler == nil { + successHandler = DefaultHealthyHandler + } + + if r.Method != "GET" { + w.Header().Set("Allow", "GET") + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err := Check(c.Checks); err != nil { + unhealthyHandler(w, r, err) + return + } + + successHandler(w, r) +} + +type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error) + +type StatusResponse struct { + Status string `json:"status"` + Details *StatusResponseDetails `json:"details,omitempty"` +} + +type StatusResponseDetails struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func Check(checks []Checkable) (err error) { + errs := []error{} + for _, c := range checks { + if e := c.Healthy(); e != nil { + errs = append(errs, e) + } + } + + switch len(errs) { + case 0: + err = nil + case 1: + err = errs[0] + default: + err = fmt.Errorf("multiple health check failure: %v", errs) + } + + return +} + +func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) { + err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{ + Status: "ok", + }) + if err != nil { + // TODO(bobbyrullo): replace with logging from new logging pkg, + // once it lands. + log.Printf("Failed to write JSON response: %v", err) + } +} + +func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) { + writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{ + Status: "error", + Details: &StatusResponseDetails{ + Code: http.StatusInternalServerError, + Message: err.Error(), + }, + }) + if writeErr != nil { + // TODO(bobbyrullo): replace with logging from new logging pkg, + // once it lands. + log.Printf("Failed to write JSON response: %v", err) + } +} + +// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported. +func ExpvarHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/coreos/pkg/httputil/README.md b/vendor/github.com/coreos/pkg/httputil/README.md new file mode 100644 index 00000000..44fa751c --- /dev/null +++ b/vendor/github.com/coreos/pkg/httputil/README.md @@ -0,0 +1,13 @@ +httputil +==== + +Common code for dealing with HTTP. + +Includes: + +* Code for returning JSON responses. + +### Documentation + +Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil) + diff --git a/vendor/github.com/coreos/pkg/httputil/cookie.go b/vendor/github.com/coreos/pkg/httputil/cookie.go new file mode 100644 index 00000000..c37a37bb --- /dev/null +++ b/vendor/github.com/coreos/pkg/httputil/cookie.go @@ -0,0 +1,21 @@ +package httputil + +import ( + "net/http" + "time" +) + +// DeleteCookies effectively deletes all named cookies +// by wiping all data and setting to expire immediately. +func DeleteCookies(w http.ResponseWriter, cookieNames ...string) { + for _, n := range cookieNames { + c := &http.Cookie{ + Name: n, + Value: "", + Path: "/", + MaxAge: -1, + Expires: time.Time{}, + } + http.SetCookie(w, c) + } +} diff --git a/vendor/github.com/coreos/pkg/httputil/json.go b/vendor/github.com/coreos/pkg/httputil/json.go new file mode 100644 index 00000000..0b092350 --- /dev/null +++ b/vendor/github.com/coreos/pkg/httputil/json.go @@ -0,0 +1,27 @@ +package httputil + +import ( + "encoding/json" + "net/http" +) + +const ( + JSONContentType = "application/json" +) + +func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error { + enc, err := json.Marshal(resp) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return err + } + + w.Header().Set("Content-Type", JSONContentType) + w.WriteHeader(code) + + _, err = w.Write(enc) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff.go b/vendor/github.com/coreos/pkg/timeutil/backoff.go new file mode 100644 index 00000000..b34fb496 --- /dev/null +++ b/vendor/github.com/coreos/pkg/timeutil/backoff.go @@ -0,0 +1,15 @@ +package timeutil + +import ( + "time" +) + +func ExpBackoff(prev, max time.Duration) time.Duration { + if prev == 0 { + return time.Second + } + if prev > max/2 { + return max + } + return 2 * prev +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 00000000..ed5cb244 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.4 + - 1.3 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 00000000..0eb9b72d --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 00000000..d0d826ba --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,29 @@ +## JSON-Patch + +Provides the ability to modify and test a JSON according to a +[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). + +*Version*: **1.0** + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) + +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) + +### API Usage + +* Given a `[]byte`, obtain a Patch object + + `obj, err := jsonpatch.DecodePatch(patch)` + +* Apply the patch and get a new document back + + `out, err := obj.Apply(doc)` + +* Create a JSON Merge Patch document based on two json documents (a to b): + + `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` + +* Bonus API: compare documents for structural equality + + `jsonpatch.Equal(doca, docb)` + diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 00000000..330b9b52 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,306 @@ +package jsonpatch + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func merge(cur, patch *lazyNode) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc) + + return cur +} + +func mergeDocs(doc, patch *partialDoc) { + for k, v := range *patch { + k := decodePatchKey(k) + if v == nil { + delete(*doc, k) + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + doc = pruneDocNulls(patch) + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch) + } + + return json.Marshal(doc) +} + +// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +// +// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. +// The function will return a mergeable json document with differences from a to b. +// +// An error will be returned if any of the two documents are invalid. +func CreateMergePatch(a, b []byte) ([]byte, error) { + aI := map[string]interface{}{} + bI := map[string]interface{}{} + err := json.Unmarshal(a, &aI) + if err != nil { + return nil, errBadJSONDoc + } + err = json.Unmarshal(b, &bI) + if err != nil { + return nil, errBadJSONDoc + } + dest, err := getDiff(aI, bI) + if err != nil { + return nil, err + } + return json.Marshal(dest) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + escapedKey := encodePatchKey(key) + av, ok := a[key] + // value was added + if !ok { + into[escapedKey] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[escapedKey] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[escapedKey] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[escapedKey] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[escapedKey] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[escapedKey] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} + +func encodePatchKey(k string) string { + return rfc6901Encoder.Replace(k) +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 00000000..8988da54 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,579 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +type operation map[string]*json.RawMessage + +// Patch is an ordered collection of operations. +type Patch []operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, fmt.Errorf("Unknown type") + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +func (o operation) kind() string { + if obj, ok := o["op"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) path() string { + if obj, ok := o["path"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) from() string { + if obj, ok := o["from"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *partialDoc, path string) (container, string) { + doc := container(pd) + + split := strings.Split(path, "/") + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistant key: %s", key) + } + + delete(*d, key) + return nil +} + +func (d *partialArray) set(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + sz := len(*d) + if idx+1 > sz { + sz = idx + 1 + } + + ary := make([]*lazyNode, sz) + + cur := *d + + copy(ary, cur) + + if idx >= len(ary) { + fmt.Printf("huh?: %#v[%d] %s, %s\n", ary, idx) + } + + ary[idx] = val + + *d = ary + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + ary := make([]*lazyNode, len(*d)+1) + + cur := *d + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to remove invalid index: %d", idx) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *partialDoc, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) + } + + return con.add(key, op.value()) +} + +func (p Patch) remove(doc *partialDoc, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) + } + + return con.remove(key) +} + +func (p Patch) replace(doc *partialDoc, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + } + + return con.set(key, op.value()) +} + +func (p Patch) move(doc *partialDoc, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + err = con.remove(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +func (p Patch) test(doc *partialDoc, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + + if err != nil { + return err + } + + if val == nil { + if op.value().raw == nil { + return nil + } else { + return fmt.Errorf("Testing value %s failed", path) + } + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("Testing value %s failed", path) +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + pd := &partialDoc{} + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + for _, op := range p { + switch op.kind() { + case "add": + err = p.add(pd, op) + case "remove": + err = p.remove(pd, op) + case "replace": + err = p.replace(pd, op) + case "move": + err = p.move(pd, op) + case "test": + err = p.test(pd, op) + default: + err = fmt.Errorf("Unexpected kind: %s", op.kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 00000000..cdfe2991 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,121 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specificies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..1b1b1921 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 00000000..f1f06564 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 00000000..e98ddec9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,223 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 00000000..5810782f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,867 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 00000000..231b0740 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1325 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 00000000..f5db1def --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,276 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 00000000..054f4f1d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,399 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 00000000..0de8f8df --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,894 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 00000000..e25e01e6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,280 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..749919d2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..e9be0fe9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 00000000..d4531c05 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,842 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 00000000..2336b144 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,751 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 00000000..45132326 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,806 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/google/cadvisor/LICENSE b/vendor/github.com/google/cadvisor/LICENSE new file mode 100644 index 00000000..97cec18e --- /dev/null +++ b/vendor/github.com/google/cadvisor/LICENSE @@ -0,0 +1,190 @@ + Copyright 2014 The cAdvisor Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/google/cadvisor/info/v1/container.go b/vendor/github.com/google/cadvisor/info/v1/container.go new file mode 100644 index 00000000..6e7e6589 --- /dev/null +++ b/vendor/github.com/google/cadvisor/info/v1/container.go @@ -0,0 +1,583 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "reflect" + "time" +) + +type CpuSpec struct { + Limit uint64 `json:"limit"` + MaxLimit uint64 `json:"max_limit"` + Mask string `json:"mask,omitempty"` + Quota uint64 `json:"quota,omitempty"` + Period uint64 `json:"period,omitempty"` +} + +type MemorySpec struct { + // The amount of memory requested. Default is unlimited (-1). + // Units: bytes. + Limit uint64 `json:"limit,omitempty"` + + // The amount of guaranteed memory. Default is 0. + // Units: bytes. + Reservation uint64 `json:"reservation,omitempty"` + + // The amount of swap space requested. Default is unlimited (-1). + // Units: bytes. + SwapLimit uint64 `json:"swap_limit,omitempty"` +} + +type ContainerSpec struct { + // Time at which the container was created. + CreationTime time.Time `json:"creation_time,omitempty"` + + // Metadata labels associated with this container. + Labels map[string]string `json:"labels,omitempty"` + // Metadata envs associated with this container. Only whitelisted envs are added. + Envs map[string]string `json:"envs,omitempty"` + + HasCpu bool `json:"has_cpu"` + Cpu CpuSpec `json:"cpu,omitempty"` + + HasMemory bool `json:"has_memory"` + Memory MemorySpec `json:"memory,omitempty"` + + HasNetwork bool `json:"has_network"` + + HasFilesystem bool `json:"has_filesystem"` + + // HasDiskIo when true, indicates that DiskIo stats will be available. + HasDiskIo bool `json:"has_diskio"` + + HasCustomMetrics bool `json:"has_custom_metrics"` + CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"` + + // Image name used for this container. + Image string `json:"image,omitempty"` +} + +// Container reference contains enough information to uniquely identify a container +type ContainerReference struct { + // The container id + Id string `json:"id,omitempty"` + + // The absolute name of the container. This is unique on the machine. + Name string `json:"name"` + + // Other names by which the container is known within a certain namespace. + // This is unique within that namespace. + Aliases []string `json:"aliases,omitempty"` + + // Namespace under which the aliases of a container are unique. + // An example of a namespace is "docker" for Docker containers. + Namespace string `json:"namespace,omitempty"` + + Labels map[string]string `json:"labels,omitempty"` +} + +// Sorts by container name. +type ContainerReferenceSlice []ContainerReference + +func (self ContainerReferenceSlice) Len() int { return len(self) } +func (self ContainerReferenceSlice) Swap(i, j int) { self[i], self[j] = self[j], self[i] } +func (self ContainerReferenceSlice) Less(i, j int) bool { return self[i].Name < self[j].Name } + +// ContainerInfoRequest is used when users check a container info from the REST API. +// It specifies how much data users want to get about a container +type ContainerInfoRequest struct { + // Max number of stats to return. Specify -1 for all stats currently available. + // Default: 60 + NumStats int `json:"num_stats,omitempty"` + + // Start time for which to query information. + // If ommitted, the beginning of time is assumed. + Start time.Time `json:"start,omitempty"` + + // End time for which to query information. + // If ommitted, current time is assumed. + End time.Time `json:"end,omitempty"` +} + +// Returns a ContainerInfoRequest with all default values specified. +func DefaultContainerInfoRequest() ContainerInfoRequest { + return ContainerInfoRequest{ + NumStats: 60, + } +} + +func (self *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool { + return self.NumStats == other.NumStats && + self.Start.Equal(other.Start) && + self.End.Equal(other.End) +} + +type ContainerInfo struct { + ContainerReference + + // The direct subcontainers of the current container. + Subcontainers []ContainerReference `json:"subcontainers,omitempty"` + + // The isolation used in the container. + Spec ContainerSpec `json:"spec,omitempty"` + + // Historical statistics gathered from the container. + Stats []*ContainerStats `json:"stats,omitempty"` +} + +// TODO(vmarmol): Refactor to not need this equality comparison. +// ContainerInfo may be (un)marshaled by json or other en/decoder. In that +// case, the Timestamp field in each stats/sample may not be precisely +// en/decoded. This will lead to small but acceptable differences between a +// ContainerInfo and its encode-then-decode version. Eq() is used to compare +// two ContainerInfo accepting small difference (<10ms) of Time fields. +func (self *ContainerInfo) Eq(b *ContainerInfo) bool { + + // If both self and b are nil, then Eq() returns true + if self == nil { + return b == nil + } + if b == nil { + return self == nil + } + + // For fields other than time.Time, we will compare them precisely. + // This would require that any slice should have same order. + if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) { + return false + } + if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) { + return false + } + if !self.Spec.Eq(&b.Spec) { + return false + } + + for i, expectedStats := range b.Stats { + selfStats := self.Stats[i] + if !expectedStats.Eq(selfStats) { + return false + } + } + + return true +} + +func (self *ContainerSpec) Eq(b *ContainerSpec) bool { + // Creation within 1s of each other. + diff := self.CreationTime.Sub(b.CreationTime) + if (diff > time.Second) || (diff < -time.Second) { + return false + } + + if self.HasCpu != b.HasCpu { + return false + } + if !reflect.DeepEqual(self.Cpu, b.Cpu) { + return false + } + if self.HasMemory != b.HasMemory { + return false + } + if !reflect.DeepEqual(self.Memory, b.Memory) { + return false + } + if self.HasNetwork != b.HasNetwork { + return false + } + if self.HasFilesystem != b.HasFilesystem { + return false + } + if self.HasDiskIo != b.HasDiskIo { + return false + } + if self.HasCustomMetrics != b.HasCustomMetrics { + return false + } + return true +} + +func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats { + n := len(self.Stats) + 1 + for i, s := range self.Stats { + if s.Timestamp.After(ref) { + n = i + break + } + } + if n > len(self.Stats) { + return nil + } + return self.Stats[n:] +} + +func (self *ContainerInfo) StatsStartTime() time.Time { + var ret time.Time + for _, s := range self.Stats { + if s.Timestamp.Before(ret) || ret.IsZero() { + ret = s.Timestamp + } + } + return ret +} + +func (self *ContainerInfo) StatsEndTime() time.Time { + var ret time.Time + for i := len(self.Stats) - 1; i >= 0; i-- { + s := self.Stats[i] + if s.Timestamp.After(ret) { + ret = s.Timestamp + } + } + return ret +} + +// This mirrors kernel internal structure. +type LoadStats struct { + // Number of sleeping tasks. + NrSleeping uint64 `json:"nr_sleeping"` + + // Number of running tasks. + NrRunning uint64 `json:"nr_running"` + + // Number of tasks in stopped state + NrStopped uint64 `json:"nr_stopped"` + + // Number of tasks in uninterruptible state + NrUninterruptible uint64 `json:"nr_uninterruptible"` + + // Number of tasks waiting on IO + NrIoWait uint64 `json:"nr_io_wait"` +} + +// CPU usage time statistics. +type CpuUsage struct { + // Total CPU usage. + // Units: nanoseconds + Total uint64 `json:"total"` + + // Per CPU/core usage of the container. + // Unit: nanoseconds. + PerCpu []uint64 `json:"per_cpu_usage,omitempty"` + + // Time spent in user space. + // Unit: nanoseconds + User uint64 `json:"user"` + + // Time spent in kernel space. + // Unit: nanoseconds + System uint64 `json:"system"` +} + +// All CPU usage metrics are cumulative from the creation of the container +type CpuStats struct { + Usage CpuUsage `json:"usage"` + // Smoothed average of number of runnable threads x 1000. + // We multiply by thousand to avoid using floats, but preserving precision. + // Load is smoothed over the last 10 seconds. Instantaneous value can be read + // from LoadStats.NrRunning. + LoadAverage int32 `json:"load_average"` +} + +type PerDiskStats struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Stats map[string]uint64 `json:"stats"` +} + +type DiskIoStats struct { + IoServiceBytes []PerDiskStats `json:"io_service_bytes,omitempty"` + IoServiced []PerDiskStats `json:"io_serviced,omitempty"` + IoQueued []PerDiskStats `json:"io_queued,omitempty"` + Sectors []PerDiskStats `json:"sectors,omitempty"` + IoServiceTime []PerDiskStats `json:"io_service_time,omitempty"` + IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"` + IoMerged []PerDiskStats `json:"io_merged,omitempty"` + IoTime []PerDiskStats `json:"io_time,omitempty"` +} + +type MemoryStats struct { + // Current memory usage, this includes all memory regardless of when it was + // accessed. + // Units: Bytes. + Usage uint64 `json:"usage"` + + // Number of bytes of page cache memory. + // Units: Bytes. + Cache uint64 `json:"cache"` + + // The amount of anonymous and swap cache memory (includes transparent + // hugepages). + // Units: Bytes. + RSS uint64 `json:"rss"` + + // The amount of working set memory, this includes recently accessed memory, + // dirty memory, and kernel memory. Working set is <= "usage". + // Units: Bytes. + WorkingSet uint64 `json:"working_set"` + + Failcnt uint64 `json:"failcnt"` + + ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"` + HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"` +} + +type MemoryStatsMemoryData struct { + Pgfault uint64 `json:"pgfault"` + Pgmajfault uint64 `json:"pgmajfault"` +} + +type InterfaceStats struct { + // The name of the interface. + Name string `json:"name"` + // Cumulative count of bytes received. + RxBytes uint64 `json:"rx_bytes"` + // Cumulative count of packets received. + RxPackets uint64 `json:"rx_packets"` + // Cumulative count of receive errors encountered. + RxErrors uint64 `json:"rx_errors"` + // Cumulative count of packets dropped while receiving. + RxDropped uint64 `json:"rx_dropped"` + // Cumulative count of bytes transmitted. + TxBytes uint64 `json:"tx_bytes"` + // Cumulative count of packets transmitted. + TxPackets uint64 `json:"tx_packets"` + // Cumulative count of transmit errors encountered. + TxErrors uint64 `json:"tx_errors"` + // Cumulative count of packets dropped while transmitting. + TxDropped uint64 `json:"tx_dropped"` +} + +type NetworkStats struct { + InterfaceStats `json:",inline"` + Interfaces []InterfaceStats `json:"interfaces,omitempty"` + // TCP connection stats (Established, Listen...) + Tcp TcpStat `json:"tcp"` + // TCP6 connection stats (Established, Listen...) + Tcp6 TcpStat `json:"tcp6"` +} + +type TcpStat struct { + //Count of TCP connections in state "Established" + Established uint64 + //Count of TCP connections in state "Syn_Sent" + SynSent uint64 + //Count of TCP connections in state "Syn_Recv" + SynRecv uint64 + //Count of TCP connections in state "Fin_Wait1" + FinWait1 uint64 + //Count of TCP connections in state "Fin_Wait2" + FinWait2 uint64 + //Count of TCP connections in state "Time_Wait + TimeWait uint64 + //Count of TCP connections in state "Close" + Close uint64 + //Count of TCP connections in state "Close_Wait" + CloseWait uint64 + //Count of TCP connections in state "Listen_Ack" + LastAck uint64 + //Count of TCP connections in state "Listen" + Listen uint64 + //Count of TCP connections in state "Closing" + Closing uint64 +} + +type FsStats struct { + // The block device name associated with the filesystem. + Device string `json:"device,omitempty"` + + // Type of the filesytem. + Type string `json:"type"` + + // Number of bytes that can be consumed by the container on this filesystem. + Limit uint64 `json:"capacity"` + + // Number of bytes that is consumed by the container on this filesystem. + Usage uint64 `json:"usage"` + + // Base Usage that is consumed by the container's writable layer. + // This field is only applicable for docker container's as of now. + BaseUsage uint64 `json:"base_usage"` + + // Number of bytes available for non-root user. + Available uint64 `json:"available"` + + // Number of available Inodes + InodesFree uint64 `json:"inodes_free"` + + // Number of reads completed + // This is the total number of reads completed successfully. + ReadsCompleted uint64 `json:"reads_completed"` + + // Number of reads merged + // Reads and writes which are adjacent to each other may be merged for + // efficiency. Thus two 4K reads may become one 8K read before it is + // ultimately handed to the disk, and so it will be counted (and queued) + // as only one I/O. This field lets you know how often this was done. + ReadsMerged uint64 `json:"reads_merged"` + + // Number of sectors read + // This is the total number of sectors read successfully. + SectorsRead uint64 `json:"sectors_read"` + + // Number of milliseconds spent reading + // This is the total number of milliseconds spent by all reads (as + // measured from __make_request() to end_that_request_last()). + ReadTime uint64 `json:"read_time"` + + // Number of writes completed + // This is the total number of writes completed successfully. + WritesCompleted uint64 `json:"writes_completed"` + + // Number of writes merged + // See the description of reads merged. + WritesMerged uint64 `json:"writes_merged"` + + // Number of sectors written + // This is the total number of sectors written successfully. + SectorsWritten uint64 `json:"sectors_written"` + + // Number of milliseconds spent writing + // This is the total number of milliseconds spent by all writes (as + // measured from __make_request() to end_that_request_last()). + WriteTime uint64 `json:"write_time"` + + // Number of I/Os currently in progress + // The only field that should go to zero. Incremented as requests are + // given to appropriate struct request_queue and decremented as they finish. + IoInProgress uint64 `json:"io_in_progress"` + + // Number of milliseconds spent doing I/Os + // This field increases so long as field 9 is nonzero. + IoTime uint64 `json:"io_time"` + + // weighted number of milliseconds spent doing I/Os + // This field is incremented at each I/O start, I/O completion, I/O + // merge, or read of these stats by the number of I/Os in progress + // (field 9) times the number of milliseconds spent doing I/O since the + // last update of this field. This can provide an easy measure of both + // I/O completion time and the backlog that may be accumulating. + WeightedIoTime uint64 `json:"weighted_io_time"` +} + +type ContainerStats struct { + // The time of this stat point. + Timestamp time.Time `json:"timestamp"` + Cpu CpuStats `json:"cpu,omitempty"` + DiskIo DiskIoStats `json:"diskio,omitempty"` + Memory MemoryStats `json:"memory,omitempty"` + Network NetworkStats `json:"network,omitempty"` + + // Filesystem statistics + Filesystem []FsStats `json:"filesystem,omitempty"` + + // Task load stats + TaskStats LoadStats `json:"task_stats,omitempty"` + + //Custom metrics from all collectors + CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"` +} + +func timeEq(t1, t2 time.Time, tolerance time.Duration) bool { + // t1 should not be later than t2 + if t1.After(t2) { + t1, t2 = t2, t1 + } + diff := t2.Sub(t1) + if diff <= tolerance { + return true + } + return false +} + +const ( + // 10ms, i.e. 0.01s + timePrecision time.Duration = 10 * time.Millisecond +) + +// This function is useful because we do not require precise time +// representation. +func (a *ContainerStats) Eq(b *ContainerStats) bool { + if !timeEq(a.Timestamp, b.Timestamp, timePrecision) { + return false + } + return a.StatsEq(b) +} + +// Checks equality of the stats values. +func (a *ContainerStats) StatsEq(b *ContainerStats) bool { + // TODO(vmarmol): Consider using this through reflection. + if !reflect.DeepEqual(a.Cpu, b.Cpu) { + return false + } + if !reflect.DeepEqual(a.Memory, b.Memory) { + return false + } + if !reflect.DeepEqual(a.DiskIo, b.DiskIo) { + return false + } + if !reflect.DeepEqual(a.Network, b.Network) { + return false + } + if !reflect.DeepEqual(a.Filesystem, b.Filesystem) { + return false + } + return true +} + +// Event contains information general to events such as the time at which they +// occurred, their specific type, and the actual event. Event types are +// differentiated by the EventType field of Event. +type Event struct { + // the absolute container name for which the event occurred + ContainerName string `json:"container_name"` + + // the time at which the event occurred + Timestamp time.Time `json:"timestamp"` + + // the type of event. EventType is an enumerated type + EventType EventType `json:"event_type"` + + // the original event object and all of its extraneous data, ex. an + // OomInstance + EventData EventData `json:"event_data,omitempty"` +} + +// EventType is an enumerated type which lists the categories under which +// events may fall. The Event field EventType is populated by this enum. +type EventType string + +const ( + EventOom EventType = "oom" + EventOomKill = "oomKill" + EventContainerCreation = "containerCreation" + EventContainerDeletion = "containerDeletion" +) + +// Extra information about an event. Only one type will be set. +type EventData struct { + // Information about an OOM kill event. + OomKill *OomKillEventData `json:"oom,omitempty"` +} + +// Information related to an OOM kill instance +type OomKillEventData struct { + // process id of the killed process + Pid int `json:"pid"` + + // The name of the killed process + ProcessName string `json:"process_name"` +} diff --git a/vendor/github.com/google/cadvisor/info/v1/docker.go b/vendor/github.com/google/cadvisor/info/v1/docker.go new file mode 100644 index 00000000..2703c534 --- /dev/null +++ b/vendor/github.com/google/cadvisor/info/v1/docker.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Types used for docker containers. +package v1 + +type DockerStatus struct { + Version string `json:"version"` + KernelVersion string `json:"kernel_version"` + OS string `json:"os"` + Hostname string `json:"hostname"` + RootDir string `json:"root_dir"` + Driver string `json:"driver"` + DriverStatus map[string]string `json:"driver_status"` + ExecDriver string `json:"exec_driver"` + NumImages int `json:"num_images"` + NumContainers int `json:"num_containers"` +} + +type DockerImage struct { + ID string `json:"id"` + RepoTags []string `json:"repo_tags"` // repository name and tags. + Created int64 `json:"created"` // unix time since creation. + VirtualSize int64 `json:"virtual_size"` + Size int64 `json:"size"` +} diff --git a/vendor/github.com/google/cadvisor/info/v1/machine.go b/vendor/github.com/google/cadvisor/info/v1/machine.go new file mode 100644 index 00000000..74a5df49 --- /dev/null +++ b/vendor/github.com/google/cadvisor/info/v1/machine.go @@ -0,0 +1,205 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +type FsInfo struct { + // Block device associated with the filesystem. + Device string `json:"device"` + + // Total number of bytes available on the filesystem. + Capacity uint64 `json:"capacity"` + + // Type of device. + Type string `json:"type"` + + // Total number of inodes available on the filesystem. + Inodes uint64 `json:"inodes"` +} + +type Node struct { + Id int `json:"node_id"` + // Per-node memory + Memory uint64 `json:"memory"` + Cores []Core `json:"cores"` + Caches []Cache `json:"caches"` +} + +type Core struct { + Id int `json:"core_id"` + Threads []int `json:"thread_ids"` + Caches []Cache `json:"caches"` +} + +type Cache struct { + // Size of memory cache in bytes. + Size uint64 `json:"size"` + // Type of memory cache: data, instruction, or unified. + Type string `json:"type"` + // Level (distance from cpus) in a multi-level cache hierarchy. + Level int `json:"level"` +} + +func (self *Node) FindCore(id int) (bool, int) { + for i, n := range self.Cores { + if n.Id == id { + return true, i + } + } + return false, -1 +} + +func (self *Node) AddThread(thread int, core int) { + var coreIdx int + if core == -1 { + // Assume one hyperthread per core when topology data is missing. + core = thread + } + ok, coreIdx := self.FindCore(core) + + if !ok { + // New core + core := Core{Id: core} + self.Cores = append(self.Cores, core) + coreIdx = len(self.Cores) - 1 + } + self.Cores[coreIdx].Threads = append(self.Cores[coreIdx].Threads, thread) +} + +func (self *Node) AddNodeCache(c Cache) { + self.Caches = append(self.Caches, c) +} + +func (self *Node) AddPerCoreCache(c Cache) { + for idx := range self.Cores { + self.Cores[idx].Caches = append(self.Cores[idx].Caches, c) + } +} + +type DiskInfo struct { + // device name + Name string `json:"name"` + + // Major number + Major uint64 `json:"major"` + + // Minor number + Minor uint64 `json:"minor"` + + // Size in bytes + Size uint64 `json:"size"` + + // I/O Scheduler - one of "none", "noop", "cfq", "deadline" + Scheduler string `json:"scheduler"` +} + +type NetInfo struct { + // Device name + Name string `json:"name"` + + // Mac Address + MacAddress string `json:"mac_address"` + + // Speed in MBits/s + Speed int64 `json:"speed"` + + // Maximum Transmission Unit + Mtu int64 `json:"mtu"` +} + +type CloudProvider string + +const ( + GCE CloudProvider = "GCE" + AWS = "AWS" + Azure = "Azure" + Baremetal = "Baremetal" + UnknownProvider = "Unknown" +) + +type InstanceType string + +const ( + NoInstance InstanceType = "None" + UnknownInstance = "Unknown" +) + +type InstanceID string + +const ( + UnNamedInstance InstanceID = "None" +) + +type MachineInfo struct { + // The number of cores in this machine. + NumCores int `json:"num_cores"` + + // Maximum clock speed for the cores, in KHz. + CpuFrequency uint64 `json:"cpu_frequency_khz"` + + // The amount of memory (in bytes) in this machine + MemoryCapacity uint64 `json:"memory_capacity"` + + // The machine id + MachineID string `json:"machine_id"` + + // The system uuid + SystemUUID string `json:"system_uuid"` + + // The boot id + BootID string `json:"boot_id"` + + // Filesystems on this machine. + Filesystems []FsInfo `json:"filesystems"` + + // Disk map + DiskMap map[string]DiskInfo `json:"disk_map"` + + // Network devices + NetworkDevices []NetInfo `json:"network_devices"` + + // Machine Topology + // Describes cpu/memory layout and hierarchy. + Topology []Node `json:"topology"` + + // Cloud provider the machine belongs to. + CloudProvider CloudProvider `json:"cloud_provider"` + + // Type of cloud instance (e.g. GCE standard) the machine is. + InstanceType InstanceType `json:"instance_type"` + + // ID of cloud instance (e.g. instance-1) given to it by the cloud provider. + InstanceID InstanceID `json:"instance_id"` +} + +type VersionInfo struct { + // Kernel version. + KernelVersion string `json:"kernel_version"` + + // OS image being used for cadvisor container, or host image if running on host directly. + ContainerOsVersion string `json:"container_os_version"` + + // Docker version. + DockerVersion string `json:"docker_version"` + + // cAdvisor version. + CadvisorVersion string `json:"cadvisor_version"` + // cAdvisor git revision. + CadvisorRevision string `json:"cadvisor_revision"` +} + +type MachineInfoFactory interface { + GetMachineInfo() (*MachineInfo, error) + GetVersionInfo() (*VersionInfo, error) +} diff --git a/vendor/github.com/google/cadvisor/info/v1/metric.go b/vendor/github.com/google/cadvisor/info/v1/metric.go new file mode 100644 index 00000000..90fd9e49 --- /dev/null +++ b/vendor/github.com/google/cadvisor/info/v1/metric.go @@ -0,0 +1,79 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" +) + +// Type of metric being exported. +type MetricType string + +const ( + // Instantaneous value. May increase or decrease. + MetricGauge MetricType = "gauge" + + // A counter-like value that is only expected to increase. + MetricCumulative = "cumulative" + + // Rate over a time period. + MetricDelta = "delta" +) + +// DataType for metric being exported. +type DataType string + +const ( + IntType DataType = "int" + FloatType = "float" +) + +// Spec for custom metric. +type MetricSpec struct { + // The name of the metric. + Name string `json:"name"` + + // Type of the metric. + Type MetricType `json:"type"` + + // Data Type for the stats. + Format DataType `json:"format"` + + // Display Units for the stats. + Units string `json:"units"` +} + +// An exported metric. +type MetricValBasic struct { + // Time at which the metric was queried + Timestamp time.Time `json:"timestamp"` + + // The value of the metric at this point. + IntValue int64 `json:"int_value,omitempty"` + FloatValue float64 `json:"float_value,omitempty"` +} + +// An exported metric. +type MetricVal struct { + // Label associated with a metric + Label string `json:"label,omitempty"` + + // Time at which the metric was queried + Timestamp time.Time `json:"timestamp"` + + // The value of the metric at this point. + IntValue int64 `json:"int_value,omitempty"` + FloatValue float64 `json:"float_value,omitempty"` +} diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 00000000..9d91c633 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,2 @@ +language: go +install: go get -t diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 00000000..68668029 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 00000000..cdcea0f6 --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,68 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. + +![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg) + +## Status + +It is ready for production use. It works fine although it may use more of testing. Here some projects in the wild using Mergo: + +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) + +[![Build Status][1]][2] +[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo) + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v1 + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 00000000..6e9aa7ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 00000000..44361e88 --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,146 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil { + return + } + } else { + if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 00000000..5d328b1f --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,99 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "reflect" +) + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + switch dst.Kind() { + case reflect.Struct: + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil { + return + } + } + case reflect.Map: + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Map: + if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil { + return + } + } + if !dstElement.IsValid() { + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } else if dst.IsNil() { + if dst.CanSet() && isEmptyValue(dst) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) { + dst.Set(src) + } + } + return +} + +// Merge sets fields' values in dst from src if they have a zero +// value of their type. +// dst and src must be valid same-type structs and dst must be +// a pointer to struct. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +func Merge(dst, src interface{}) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 00000000..f8a0991e --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,90 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 00000000..5f0d1fb6 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 00000000..7a950d17 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 00000000..9d2d8a4b --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,15 @@ +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 00000000..336142a5 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,98 @@ +// +build windows +// +build !go1.4 + +package mousetrap + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const ( + // defined by the Win32 API + th32cs_snapprocess uintptr = 0x2 +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") + Process32First = kernel.MustFindProc("Process32FirstW") + Process32Next = kernel.MustFindProc("Process32NextW") +) + +// ProcessEntry32 structure defined by the Win32 API +type processEntry32 struct { + dwSize uint32 + cntUsage uint32 + th32ProcessID uint32 + th32DefaultHeapID int + th32ModuleID uint32 + cntThreads uint32 + th32ParentProcessID uint32 + pcPriClassBase int32 + dwFlags uint32 + szExeFile [syscall.MAX_PATH]uint16 +} + +func getProcessEntry(pid int) (pe *processEntry32, err error) { + snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) + if snapshot == uintptr(syscall.InvalidHandle) { + err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) + return + } + defer syscall.CloseHandle(syscall.Handle(snapshot)) + + var processEntry processEntry32 + processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) + ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32First: %v", e1) + return + } + + for { + if processEntry.th32ProcessID == uint32(pid) { + pe = &processEntry + return + } + + ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32Next: %v", e1) + return + } + } +} + +func getppid() (pid int, err error) { + pe, err := getProcessEntry(os.Getpid()) + if err != nil { + return + } + + pid = int(pe.th32ParentProcessID) + return +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + ppid, err := getppid() + if err != nil { + return false + } + + pe, err := getProcessEntry(ppid) + if err != nil { + return false + } + + name := syscall.UTF16ToString(pe.szExeFile[:]) + return name == "explorer.exe" +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go new file mode 100644 index 00000000..9a28e57c --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go @@ -0,0 +1,46 @@ +// +build windows +// +build go1.4 + +package mousetrap + +import ( + "os" + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(os.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore new file mode 100644 index 00000000..010c242b --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml new file mode 100644 index 00000000..6a363c70 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - 1.3 diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE new file mode 100644 index 00000000..5c304d1a --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md new file mode 100644 index 00000000..d43a6c79 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/README.md @@ -0,0 +1,61 @@ +clockwork +========= + +[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) +[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) + +a simple fake clock for golang + +# Usage + +Replace uses of the `time` package with the `clockwork.Clock` interface instead. + +For example, instead of using `time.Sleep` directly: + +``` +func my_func() { + time.Sleep(3 * time.Second) + do_something() +} +``` + +inject a clock and use its `Sleep` method instead: + +``` +func my_func(clock clockwork.Clock) { + clock.Sleep(3 * time.Second) + do_something() +} +``` + +Now you can easily test `my_func` with a `FakeClock`: + +``` +func TestMyFunc(t *testing.T) { + c := clockwork.NewFakeClock() + + // Start our sleepy function + my_func(c) + + // Ensure we wait until my_func is sleeping + c.BlockUntil(1) + + assert_state() + + // Advance the FakeClock forward in time + c.Advance(3) + + assert_state() +} +``` + +and in production builds, simply inject the real clock instead: +``` +my_func(clockwork.NewRealClock()) +``` + +See [example_test.go](example_test.go) for a full example. + +# Credits + +clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go new file mode 100644 index 00000000..1f1045bc --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -0,0 +1,164 @@ +package clockwork + +import ( + "sync" + "time" +) + +// Clock provides an interface that packages can use instead of directly +// using the time module, so that chronology-related behavior can be tested +type Clock interface { + After(d time.Duration) <-chan time.Time + Sleep(d time.Duration) + Now() time.Time +} + +// FakeClock provides an interface for a clock which can be +// manually advanced through time +type FakeClock interface { + Clock + // Advance advances the FakeClock to a new point in time, ensuring any existing + // sleepers are notified appropriately before returning + Advance(d time.Duration) + // BlockUntil will block until the FakeClock has the given number of + // sleepers (callers of Sleep or After) + BlockUntil(n int) +} + +// NewRealClock returns a Clock which simply delegates calls to the actual time +// package; it should be used by packages in production. +func NewRealClock() Clock { + return &realClock{} +} + +// NewFakeClock returns a FakeClock implementation which can be +// manually advanced through time for testing. +func NewFakeClock() FakeClock { + return &fakeClock{ + l: sync.RWMutex{}, + + // use a fixture that does not fulfill Time.IsZero() + time: time.Date(1900, time.January, 1, 0, 0, 0, 0, time.UTC), + } +} + +type realClock struct{} + +func (rc *realClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (rc *realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +func (rc *realClock) Now() time.Time { + return time.Now() +} + +type fakeClock struct { + sleepers []*sleeper + blockers []*blocker + time time.Time + + l sync.RWMutex +} + +// sleeper represents a caller of After or Sleep +type sleeper struct { + until time.Time + done chan time.Time +} + +// blocker represents a caller of BlockUntil +type blocker struct { + count int + ch chan struct{} +} + +// After mimics time.After; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + fc.l.Lock() + defer fc.l.Unlock() + now := fc.time + done := make(chan time.Time, 1) + if d.Nanoseconds() == 0 { + // special case - trigger immediately + done <- now + } else { + // otherwise, add to the set of sleepers + s := &sleeper{ + until: now.Add(d), + done: done, + } + fc.sleepers = append(fc.sleepers, s) + // and notify any blockers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + } + return done +} + +// notifyBlockers notifies all the blockers waiting until the +// given number of sleepers are waiting on the fakeClock. It +// returns an updated slice of blockers (i.e. those still waiting) +func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { + for _, b := range blockers { + if b.count == count { + close(b.ch) + } else { + newBlockers = append(newBlockers, b) + } + } + return +} + +// Sleep blocks until the given duration has passed on the fakeClock +func (fc *fakeClock) Sleep(d time.Duration) { + <-fc.After(d) +} + +// Time returns the current time of the fakeClock +func (fc *fakeClock) Now() time.Time { + fc.l.Lock() + defer fc.l.Unlock() + return fc.time +} + +// Advance advances fakeClock to a new point in time, ensuring channels from any +// previous invocations of After are notified appropriately before returning +func (fc *fakeClock) Advance(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + end := fc.time.Add(d) + var newSleepers []*sleeper + for _, s := range fc.sleepers { + if end.Sub(s.until) >= 0 { + s.done <- end + } else { + newSleepers = append(newSleepers, s) + } + } + fc.sleepers = newSleepers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + fc.time = end +} + +// BlockUntil will block until the fakeClock has the given number of sleepers +// (callers of Sleep or After) +func (fc *fakeClock) BlockUntil(n int) { + fc.l.Lock() + // Fast path: current number of sleepers is what we're looking for + if len(fc.sleepers) == n { + fc.l.Unlock() + return + } + // Otherwise, set up a new blocker + b := &blocker{ + count: n, + ch: make(chan struct{}), + } + fc.blockers = append(fc.blockers, b) + fc.l.Unlock() + <-b.ch +} diff --git a/vendor/github.com/juju/ratelimit/LICENSE b/vendor/github.com/juju/ratelimit/LICENSE new file mode 100644 index 00000000..ade9307b --- /dev/null +++ b/vendor/github.com/juju/ratelimit/LICENSE @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/juju/ratelimit/README.md b/vendor/github.com/juju/ratelimit/README.md new file mode 100644 index 00000000..a0fdfe2b --- /dev/null +++ b/vendor/github.com/juju/ratelimit/README.md @@ -0,0 +1,117 @@ +# ratelimit +-- + import "github.com/juju/ratelimit" + +The ratelimit package provides an efficient token bucket implementation. See +http://en.wikipedia.org/wiki/Token_bucket. + +## Usage + +#### func Reader + +```go +func Reader(r io.Reader, bucket *Bucket) io.Reader +``` +Reader returns a reader that is rate limited by the given token bucket. Each +token in the bucket represents one byte. + +#### func Writer + +```go +func Writer(w io.Writer, bucket *Bucket) io.Writer +``` +Writer returns a writer that is rate limited by the given token bucket. Each +token in the bucket represents one byte. + +#### type Bucket + +```go +type Bucket struct { +} +``` + +Bucket represents a token bucket that fills at a predetermined rate. Methods on +Bucket may be called concurrently. + +#### func NewBucket + +```go +func NewBucket(fillInterval time.Duration, capacity int64) *Bucket +``` +NewBucket returns a new token bucket that fills at the rate of one token every +fillInterval, up to the given maximum capacity. Both arguments must be positive. +The bucket is initially full. + +#### func NewBucketWithQuantum + +```go +func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket +``` +NewBucketWithQuantum is similar to NewBucket, but allows the specification of +the quantum size - quantum tokens are added every fillInterval. + +#### func NewBucketWithRate + +```go +func NewBucketWithRate(rate float64, capacity int64) *Bucket +``` +NewBucketWithRate returns a token bucket that fills the bucket at the rate of +rate tokens per second up to the given maximum capacity. Because of limited +clock resolution, at high rates, the actual rate may be up to 1% different from +the specified rate. + +#### func (*Bucket) Rate + +```go +func (tb *Bucket) Rate() float64 +``` +Rate returns the fill rate of the bucket, in tokens per second. + +#### func (*Bucket) Take + +```go +func (tb *Bucket) Take(count int64) time.Duration +``` +Take takes count tokens from the bucket without blocking. It returns the time +that the caller should wait until the tokens are actually available. + +Note that if the request is irrevocable - there is no way to return tokens to +the bucket once this method commits us to taking them. + +#### func (*Bucket) TakeAvailable + +```go +func (tb *Bucket) TakeAvailable(count int64) int64 +``` +TakeAvailable takes up to count immediately available tokens from the bucket. It +returns the number of tokens removed, or zero if there are no available tokens. +It does not block. + +#### func (*Bucket) TakeMaxDuration + +```go +func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) +``` +TakeMaxDuration is like Take, except that it will only take tokens from the +bucket if the wait time for the tokens is no greater than maxWait. + +If it would take longer than maxWait for the tokens to become available, it does +nothing and reports false, otherwise it returns the time that the caller should +wait until the tokens are actually available, and reports true. + +#### func (*Bucket) Wait + +```go +func (tb *Bucket) Wait(count int64) +``` +Wait takes count tokens from the bucket, waiting until they are available. + +#### func (*Bucket) WaitMaxDuration + +```go +func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool +``` +WaitMaxDuration is like Wait except that it will only take tokens from the +bucket if it needs to wait for no greater than maxWait. It reports whether any +tokens have been removed from the bucket If no tokens have been removed, it +returns immediately. diff --git a/vendor/github.com/juju/ratelimit/ratelimit.go b/vendor/github.com/juju/ratelimit/ratelimit.go new file mode 100644 index 00000000..3ef32fbc --- /dev/null +++ b/vendor/github.com/juju/ratelimit/ratelimit.go @@ -0,0 +1,245 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3 with static-linking exception. +// See LICENCE file for details. + +// The ratelimit package provides an efficient token bucket implementation +// that can be used to limit the rate of arbitrary things. +// See http://en.wikipedia.org/wiki/Token_bucket. +package ratelimit + +import ( + "math" + "strconv" + "sync" + "time" +) + +// Bucket represents a token bucket that fills at a predetermined rate. +// Methods on Bucket may be called concurrently. +type Bucket struct { + startTime time.Time + capacity int64 + quantum int64 + fillInterval time.Duration + + // The mutex guards the fields following it. + mu sync.Mutex + + // avail holds the number of available tokens + // in the bucket, as of availTick ticks from startTime. + // It will be negative when there are consumers + // waiting for tokens. + avail int64 + availTick int64 +} + +// NewBucket returns a new token bucket that fills at the +// rate of one token every fillInterval, up to the given +// maximum capacity. Both arguments must be +// positive. The bucket is initially full. +func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { + return NewBucketWithQuantum(fillInterval, capacity, 1) +} + +// rateMargin specifes the allowed variance of actual +// rate from specified rate. 1% seems reasonable. +const rateMargin = 0.01 + +// NewBucketWithRate returns a token bucket that fills the bucket +// at the rate of rate tokens per second up to the given +// maximum capacity. Because of limited clock resolution, +// at high rates, the actual rate may be up to 1% different from the +// specified rate. +func NewBucketWithRate(rate float64, capacity int64) *Bucket { + for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { + fillInterval := time.Duration(1e9 * float64(quantum) / rate) + if fillInterval <= 0 { + continue + } + tb := NewBucketWithQuantum(fillInterval, capacity, quantum) + if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { + return tb + } + } + panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64)) +} + +// nextQuantum returns the next quantum to try after q. +// We grow the quantum exponentially, but slowly, so we +// get a good fit in the lower numbers. +func nextQuantum(q int64) int64 { + q1 := q * 11 / 10 + if q1 == q { + q1++ + } + return q1 +} + +// NewBucketWithQuantum is similar to NewBucket, but allows +// the specification of the quantum size - quantum tokens +// are added every fillInterval. +func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { + if fillInterval <= 0 { + panic("token bucket fill interval is not > 0") + } + if capacity <= 0 { + panic("token bucket capacity is not > 0") + } + if quantum <= 0 { + panic("token bucket quantum is not > 0") + } + return &Bucket{ + startTime: time.Now(), + capacity: capacity, + quantum: quantum, + avail: capacity, + fillInterval: fillInterval, + } +} + +// Wait takes count tokens from the bucket, waiting until they are +// available. +func (tb *Bucket) Wait(count int64) { + if d := tb.Take(count); d > 0 { + time.Sleep(d) + } +} + +// WaitMaxDuration is like Wait except that it will +// only take tokens from the bucket if it needs to wait +// for no greater than maxWait. It reports whether +// any tokens have been removed from the bucket +// If no tokens have been removed, it returns immediately. +func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool { + d, ok := tb.TakeMaxDuration(count, maxWait) + if d > 0 { + time.Sleep(d) + } + return ok +} + +const infinityDuration time.Duration = 0x7fffffffffffffff + +// Take takes count tokens from the bucket without blocking. It returns +// the time that the caller should wait until the tokens are actually +// available. +// +// Note that if the request is irrevocable - there is no way to return +// tokens to the bucket once this method commits us to taking them. +func (tb *Bucket) Take(count int64) time.Duration { + d, _ := tb.take(time.Now(), count, infinityDuration) + return d +} + +// TakeMaxDuration is like Take, except that +// it will only take tokens from the bucket if the wait +// time for the tokens is no greater than maxWait. +// +// If it would take longer than maxWait for the tokens +// to become available, it does nothing and reports false, +// otherwise it returns the time that the caller should +// wait until the tokens are actually available, and reports +// true. +func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { + return tb.take(time.Now(), count, maxWait) +} + +// TakeAvailable takes up to count immediately available tokens from the +// bucket. It returns the number of tokens removed, or zero if there are +// no available tokens. It does not block. +func (tb *Bucket) TakeAvailable(count int64) int64 { + return tb.takeAvailable(time.Now(), count) +} + +// takeAvailable is the internal version of TakeAvailable - it takes the +// current time as an argument to enable easy testing. +func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 { + if count <= 0 { + return 0 + } + tb.mu.Lock() + defer tb.mu.Unlock() + + tb.adjust(now) + if tb.avail <= 0 { + return 0 + } + if count > tb.avail { + count = tb.avail + } + tb.avail -= count + return count +} + +// Available returns the number of available tokens. It will be negative +// when there are consumers waiting for tokens. Note that if this +// returns greater than zero, it does not guarantee that calls that take +// tokens from the buffer will succeed, as the number of available +// tokens could have changed in the meantime. This method is intended +// primarily for metrics reporting and debugging. +func (tb *Bucket) Available() int64 { + return tb.available(time.Now()) +} + +// available is the internal version of available - it takes the current time as +// an argument to enable easy testing. +func (tb *Bucket) available(now time.Time) int64 { + tb.mu.Lock() + defer tb.mu.Unlock() + tb.adjust(now) + return tb.avail +} + +// Capacity returns the capacity that the bucket was created with. +func (tb *Bucket) Capacity() int64 { + return tb.capacity +} + +// Rate returns the fill rate of the bucket, in tokens per second. +func (tb *Bucket) Rate() float64 { + return 1e9 * float64(tb.quantum) / float64(tb.fillInterval) +} + +// take is the internal version of Take - it takes the current time as +// an argument to enable easy testing. +func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) { + if count <= 0 { + return 0, true + } + tb.mu.Lock() + defer tb.mu.Unlock() + + currentTick := tb.adjust(now) + avail := tb.avail - count + if avail >= 0 { + tb.avail = avail + return 0, true + } + // Round up the missing tokens to the nearest multiple + // of quantum - the tokens won't be available until + // that tick. + endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum + endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval) + waitTime := endTime.Sub(now) + if waitTime > maxWait { + return 0, false + } + tb.avail = avail + return waitTime, true +} + +// adjust adjusts the current bucket capacity based on the current time. +// It returns the current tick. +func (tb *Bucket) adjust(now time.Time) (currentTick int64) { + currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval) + + if tb.avail >= tb.capacity { + return + } + tb.avail += (currentTick - tb.availTick) * tb.quantum + if tb.avail > tb.capacity { + tb.avail = tb.capacity + } + tb.availTick = currentTick + return +} diff --git a/vendor/github.com/juju/ratelimit/reader.go b/vendor/github.com/juju/ratelimit/reader.go new file mode 100644 index 00000000..6403bf78 --- /dev/null +++ b/vendor/github.com/juju/ratelimit/reader.go @@ -0,0 +1,51 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3 with static-linking exception. +// See LICENCE file for details. + +package ratelimit + +import "io" + +type reader struct { + r io.Reader + bucket *Bucket +} + +// Reader returns a reader that is rate limited by +// the given token bucket. Each token in the bucket +// represents one byte. +func Reader(r io.Reader, bucket *Bucket) io.Reader { + return &reader{ + r: r, + bucket: bucket, + } +} + +func (r *reader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + if n <= 0 { + return n, err + } + r.bucket.Wait(int64(n)) + return n, err +} + +type writer struct { + w io.Writer + bucket *Bucket +} + +// Writer returns a reader that is rate limited by +// the given token bucket. Each token in the bucket +// represents one byte. +func Writer(w io.Writer, bucket *Bucket) io.Writer { + return &writer{ + w: w, + bucket: bucket, + } +} + +func (w *writer) Write(buf []byte) (int, error) { + w.bucket.Wait(int64(len(buf))) + return w.w.Write(buf) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 00000000..13f15dfc --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013 Matt T. Proud + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 00000000..66d9b545 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + headerBuf := make([]byte, binary.MaxVarintLen32) + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 00000000..c318385c --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 00000000..4b76ea9a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + buf := make([]byte, binary.MaxVarintLen32) + encodedLength := binary.PutUvarint(buf, uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 00000000..37e4a7d4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,28 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +goautoneg +http://bitbucket.org/ww/goautoneg +Copyright 2011, Open Knowledge Foundation Ltd. +See README.txt for license details. + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 00000000..3460f034 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 00000000..81032bed --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1,53 @@ +# Overview +This is the [Prometheus](http://www.prometheus.io) telemetric +instrumentation client [Go](http://golang.org) client library. It +enable authors to define process-space metrics for their servers and +expose them through a web service interface for extraction, +aggregation, and a whole slew of other post processing techniques. + +# Installing + $ go get github.com/prometheus/client_golang/prometheus + +# Example +```go +package main + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + indexed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "my_company", + Subsystem: "indexer", + Name: "documents_indexed", + Help: "The number of documents indexed.", + }) + size = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "my_company", + Subsystem: "storage", + Name: "documents_total_size_bytes", + Help: "The total size of all documents in the storage.", + }) +) + +func main() { + http.Handle("/metrics", prometheus.Handler()) + + indexed.Inc() + size.Set(5) + + http.ListenAndServe(":8080", nil) +} + +func init() { + prometheus.MustRegister(indexed) + prometheus.MustRegister(size) +} +``` + +# Documentation + +[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 00000000..c0468800 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. +// +// The stock metrics provided by this package (like Gauge, Counter, Summary) are +// also Collectors (which only ever collect one metric, namely itself). An +// implementer of Collector may, however, collect multiple metrics in a +// coordinated fashion and/or create metrics on the fly. Examples for collectors +// already implemented in this library are the metric vectors (i.e. collection +// of multiple instances of the same Metric but with different label values) +// like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by Prometheus when collecting metrics. The + // implementation sends each collected metric via the provided channel + // and returns once the last metric has been sent. The descriptor of + // each sent metric is one of those returned by Describe. Returned + // metrics that share the same descriptor must differ in their variable + // label values. This method may be called concurrently and must + // therefore be implemented in a concurrency safe way. Blocking occurs + // at the expense of total performance of rendering all registered + // metrics. Ideally, Collector implementations support concurrent + // readers. + Collect(chan<- Metric) +} + +// SelfCollector implements Collector for a single Metric so that that the +// Metric collects itself. Add it as an anonymous field to a struct that +// implements Metric, and call Init with the Metric itself as an argument. +type SelfCollector struct { + self Metric +} + +// Init provides the SelfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *SelfCollector) Init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *SelfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *SelfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 00000000..a2952d1c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,175 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "hash/fnv" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.Init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.Init(result) // Init self-collection. + return result + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 00000000..fcde784d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,201 @@ +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "hash/fnv" + "regexp" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + h := fnv.New64a() + var b bytes.Buffer // To copy string contents into, avoiding []byte allocations. + for _, val := range labelValues { + b.Reset() + b.WriteString(val) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) + } + d.id = h.Sum64() + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + h.Reset() + b.Reset() + b.WriteString(help) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) + for _, labelName := range labelNames { + b.Reset() + b.WriteString(labelName) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) + } + d.dimHash = h.Sum64() + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 00000000..425fe879 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides embeddable metric primitives for servers and +// standardized exposition of telemetry through a web services interface. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// To expose metrics registered with the Prometheus registry, an HTTP server +// needs to know about the Prometheus handler. The usual endpoint is "/metrics". +// +// http.Handle("/metrics", prometheus.Handler()) +// +// As a starting point a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }) +// ) +// +// func init() { +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.Inc() +// +// http.Handle("/metrics", prometheus.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter. +// It also exports some stats about the HTTP usage of the /metrics +// endpoint. (See the Handler function for more detail.) +// +// Two more advanced metric types are the Summary and Histogram. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// and HistogramVec. +// +// Those are all the parts needed for basic usage. Detailed documentation and +// examples are provided below. +// +// Everything else this package offers is essentially for "power users" only. A +// few pointers to "power user features": +// +// All the various ...Opts structs have a ConstLabels field for labels that +// never change their value (which is only useful under special circumstances, +// see documentation of the Opts type). +// +// The Untyped metric behaves like a Gauge, but signals the Prometheus server +// not to assume anything about its type. +// +// Functions to fine-tune how the metric registry works: EnableCollectChecks, +// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. +// +// For custom metric collection, there are two entry points: Custom Metric +// implementations and custom Collector implementations. A Metric is the +// fundamental unit in the Prometheus data model: a sample at a point in time +// together with its meta-data (like its fully-qualified name and any number of +// pairs of label name and label value) that knows how to marshal itself into a +// data transfer object (aka DTO, implemented as a protocol buffer). A Collector +// gets registered with the Prometheus registry and manages the collection of +// one or more Metrics. Many parts of this package are building blocks for +// Metrics and Collectors. Desc is the metric descriptor, actually used by all +// metrics under the hood, and by Collectors to describe the Metrics to be +// collected, but only to be dealt with by users if they implement their own +// Metrics or Collectors. To create a Desc, the BuildFQName function will come +// in handy. Other useful components for Metric and Collector implementation +// include: LabelPairSorter to sort the DTO version of label pairs, +// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at +// collection time, MetricVec to bundle custom Metrics into a metric vector +// Collector, SelfCollector to make a custom Metric collect itself. +// +// A good example for a custom Collector is the ExpVarCollector included in this +// package, which exports variables exported via the "expvar" package as +// Prometheus metrics. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go new file mode 100644 index 00000000..0f7630d5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +// ExpvarCollector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the ExpvarCollector is inherently +// slow. Thus, the ExpvarCollector is probably great for experiments and +// prototying, but you should seriously consider a more direct implementation of +// Prometheus metrics for monitoring production systems. +// +// Use NewExpvarCollector to create new instances. +type ExpvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated ExpvarCollector that still has +// to be registered with the Prometheus registry. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { + return &ExpvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *ExpvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *ExpvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 00000000..ba8a402c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "hash/fnv" + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 00000000..85fa20be --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,50 @@ +package prometheus + +import ( + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutines Gauge + gcDesc *Desc +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() *goCollector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Name: "go_goroutines", + Help: "Number of goroutines that currently exist.", + }), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + } +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() + ch <- c.gcDesc +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 00000000..f98a41bc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,450 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "hash/fnv" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +var ( + // DefBuckets are the default Histogram buckets. The default buckets are + // tailored to broadly measure the response time (in seconds) of a + // network service. Most likely, however, you will be required to define + // buckets customized to your use case. + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.Init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + SelfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 00000000..eabe6024 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,361 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler but provides more +// flexibility (at the cost of a more complex call syntax). As +// InstrumentHandler, this function registers four metric collectors, but it +// uses the provided SummaryOpts to create them. However, the fields "Name" and +// "Help" in the SummaryOpts are ignored. "Name" is replaced by +// "requests_total", "request_duration_microseconds", "request_size_bytes", and +// "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides +// more flexibility (at the cost of a more complex call syntax). See +// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 00000000..86fd81c1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, +// Untyped, and Summary. Users can implement their own Metric types, but that +// should be rarely needed. See the example for SelfCollector, which is also an +// example for a user-implemented Metric. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Implementers of custom Metric types must observe concurrency safety + // as reads of this metric may occur at any time, and any blocking + // occurs at the expense of total performance of rendering all + // registered metrics. Ideally Metric implementations should support + // concurrent readers. + // + // The Prometheus client library attempts to minimize memory allocations + // and will provide a pre-existing reset dto.Metric pointer. Prometheus + // may recycle the dto.Metric proto message, so Metric implementations + // should just populate the provided dto.Metric and then should not keep + // any reference to it. + // + // While populating dto.Metric, labels must be sorted lexicographically. + // (Implementers may find LabelPairSorter useful for that.) + Write(*dto.Metric) error +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 00000000..d8cf0eda --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,142 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) *processCollector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) *processCollector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go new file mode 100644 index 00000000..1c33848a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push.go @@ -0,0 +1,65 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +// Push triggers a metric collection by the default registry and pushes all +// collected metrics to the Pushgateway specified by addr. See the Pushgateway +// documentation for detailed implications of the job and instance +// parameter. instance can be left empty. You can use just host:port or ip:port +// as url, in which case 'http://' is added automatically. You can also include +// the schema in the URL. However, do not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and instance will +// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' +// to push to the Pushgateway.) +func Push(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "PUT") +} + +// PushAdd works like Push, but only previously pushed metrics with the same +// name (and the same job and instance) will be replaced. (It uses HTTP method +// 'POST' to push to the Pushgateway.) +func PushAdd(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "POST") +} + +// PushCollectors works like Push, but it does not collect from the default +// registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "PUT", collectors...) +} + +// PushAddCollectors works like PushAdd, but it does not collect from the +// default registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushAddCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "POST", collectors...) +} + +func pushCollectors(job, instance, url, method string, collectors ...Collector) error { + r := newRegistry() + for _, collector := range collectors { + if _, err := r.Register(collector); err != nil { + return err + } + } + return r.Push(job, instance, url, method) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 00000000..5970aaee --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,726 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "hash/fnv" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +var ( + defRegistry = newDefaultRegistry() + errAlreadyReg = errors.New("duplicate metrics collector registration attempted") +) + +// Constants relevant to the HTTP interface. +const ( + // APIVersion is the version of the format of the exported data. This + // will match this library's version, which subscribes to the Semantic + // Versioning scheme. + APIVersion = "0.0.4" + + // DelimitedTelemetryContentType is the content type set on telemetry + // data responses in delimited protobuf format. + DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` + // TextTelemetryContentType is the content type set on telemetry data + // responses in text format. + TextTelemetryContentType = `text/plain; version=` + APIVersion + // ProtoTextTelemetryContentType is the content type set on telemetry + // data responses in protobuf text format. (Only used for debugging.) + ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` + // ProtoCompactTextTelemetryContentType is the content type set on + // telemetry data responses in protobuf compact text format. (Only used + // for debugging.) + ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` + + // Constants for object pools. + numBufs = 4 + numMetricFamilies = 1000 + numMetrics = 10000 + + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 + + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + + acceptEncodingHeader = "Accept-Encoding" + acceptHeader = "Accept" +) + +// Handler returns the HTTP handler for the global Prometheus registry. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). Usually the handler is used to handle the "/metrics" endpoint. +func Handler() http.Handler { + return InstrumentHandler("prometheus", defRegistry) +} + +// UninstrumentedHandler works in the same way as Handler, but the returned HTTP +// handler is not instrumented. This is useful if no instrumentation is desired +// (for whatever reason) or if the instrumentation has to happen with a +// different handler name (or with a different instrumentation approach +// altogether). See the InstrumentHandler example. +func UninstrumentedHandler() http.Handler { + return defRegistry +} + +// Register registers a new Collector to be included in metrics collection. It +// returns an error if the descriptors provided by the Collector are invalid or +// if they - in combination with descriptors of already registered Collectors - +// do not fulfill the consistency and uniqueness criteria described in the Desc +// documentation. +// +// Do not register the same Collector multiple times concurrently. (Registering +// the same Collector twice would result in an error anyway, but on top of that, +// it is not safe to do so concurrently.) +func Register(m Collector) error { + _, err := defRegistry.Register(m) + return err +} + +// MustRegister works like Register but panics where Register would have +// returned an error. +func MustRegister(m Collector) { + err := Register(m) + if err != nil { + panic(err) + } +} + +// RegisterOrGet works like Register but does not return an error if a Collector +// is registered that equals a previously registered Collector. (Two Collectors +// are considered equal if their Describe method yields the same set of +// descriptors.) Instead, the previously registered Collector is returned (which +// is helpful if the new and previously registered Collectors are equal but not +// identical, i.e. not pointers to the same object). +// +// As for Register, it is still not safe to call RegisterOrGet with the same +// Collector multiple times concurrently. +func RegisterOrGet(m Collector) (Collector, error) { + return defRegistry.RegisterOrGet(m) +} + +// MustRegisterOrGet works like Register but panics where RegisterOrGet would +// have returned an error. +func MustRegisterOrGet(m Collector) Collector { + existing, err := RegisterOrGet(m) + if err != nil { + panic(err) + } + return existing +} + +// Unregister unregisters the Collector that equals the Collector passed in as +// an argument. (Two Collectors are considered equal if their Describe method +// yields the same set of descriptors.) The function returns whether a Collector +// was unregistered. +func Unregister(c Collector) bool { + return defRegistry.Unregister(c) +} + +// SetMetricFamilyInjectionHook sets a function that is called whenever metrics +// are collected. The hook function must be set before metrics collection begins +// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The +// MetricFamily protobufs returned by the hook function are merged with the +// metrics collected in the usual way. +// +// This is a way to directly inject MetricFamily protobufs managed and owned by +// the caller. The caller has full responsibility. As no registration of the +// injected metrics has happened, there is no descriptor to check against, and +// there are no registration-time checks. If collect-time checks are disabled +// (see function EnableCollectChecks), no sanity checks are performed on the +// returned protobufs at all. If collect-checks are enabled, type and uniqueness +// checks are performed, but no further consistency checks (which would require +// knowledge of a metric descriptor). +// +// Sorting concerns: The caller is responsible for sorting the label pairs in +// each metric. However, the order of metrics will be sorted by the registry as +// it is required anyway after merging with the metric families collected +// conventionally. +// +// The function must be callable at any time and concurrently. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + defRegistry.metricFamilyInjectionHook = hook +} + +// PanicOnCollectError sets the behavior whether a panic is caused upon an error +// while metrics are collected and served to the HTTP endpoint. By default, an +// internal server error (status code 500) is served with an error message. +func PanicOnCollectError(b bool) { + defRegistry.panicOnCollectError = b +} + +// EnableCollectChecks enables (or disables) additional consistency checks +// during metrics collection. These additional checks are not enabled by default +// because they inflict a performance penalty and the errors they check for can +// only happen if the used Metric and Collector types have internal programming +// errors. It can be helpful to enable these checks while working with custom +// Collectors or Metrics whose correctness is not well established yet. +func EnableCollectChecks(b bool) { + defRegistry.collectChecksEnabled = b +} + +// encoder is a function that writes a dto.MetricFamily to an io.Writer in a +// certain encoding. It returns the number of bytes written and any error +// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText +// are encoders. +type encoder func(io.Writer, *dto.MetricFamily) (int, error) + +type registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + bufPool chan *bytes.Buffer + metricFamilyPool chan *dto.MetricFamily + metricPool chan *dto.Metric + metricFamilyInjectionHook func() []*dto.MetricFamily + + panicOnCollectError, collectChecksEnabled bool +} + +func (r *registry) Register(c Collector) (Collector, error) { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + newDescIDs := map[uint64]struct{}{} + newDimHashesByName := map[string]uint64{} + var collectorID uint64 // Just a sum of all desc IDs. + var duplicateDescErr error + + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return nil, errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return existing, errAlreadyReg + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return nil, duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return c, nil +} + +func (r *registry) RegisterOrGet(m Collector) (Collector, error) { + existing, err := r.Register(m) + if err != nil && err != errAlreadyReg { + return nil, err + } + return existing, nil +} + +func (r *registry) Unregister(c Collector) bool { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + descIDs := map[uint64]struct{}{} + var collectorID uint64 // Just a sum of the desc IDs. + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +func (r *registry) Push(job, instance, pushURL, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) + if instance != "" { + pushURL += "/instances/" + url.QueryEscape(instance) + } + buf := r.getBuf() + defer r.giveBuf(buf) + if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { + if r.panicOnCollectError { + panic(err) + } + return err + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) + } + return nil +} + +func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { + contentType := expfmt.Negotiate(req.Header) + buf := r.getBuf() + defer r.giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { + if r.panicOnCollectError { + panic(err) + } + http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) +} + +func (r *registry) writePB(encoder expfmt.Encoder) error { + var metricHashes map[uint64]struct{} + if r.collectChecksEnabled { + metricHashes = make(map[uint64]struct{}) + } + metricChan := make(chan Metric, capMetricChan) + wg := sync.WaitGroup{} + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + metricFamily, ok := metricFamiliesByName[desc.fqName] + if !ok { + metricFamily = r.getMetricFamily() + defer r.giveMetricFamily(metricFamily) + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + metricFamiliesByName[desc.fqName] = metricFamily + } + dtoMetric := r.getMetric() + defer r.giveMetric(dtoMetric) + if err := metric.Write(dtoMetric); err != nil { + // TODO: Consider different means of error reporting so + // that a single erroneous metric could be skipped + // instead of blowing up the whole collection. + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + switch { + case metricFamily.Type != nil: + // Type already set. We are good. + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if r.collectChecksEnabled { + if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + + if r.metricFamilyInjectionHook != nil { + for _, mf := range r.metricFamilyInjectionHook() { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if !exists { + metricFamiliesByName[mf.GetName()] = mf + if r.collectChecksEnabled { + for _, m := range mf.Metric { + if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { + return err + } + } + } + continue + } + for _, m := range mf.Metric { + if r.collectChecksEnabled { + if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { + return err + } + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + + // Now that MetricFamilies are all set, sort their Metrics + // lexicographically by their label values. + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + + // Write out MetricFamilies sorted by their name. + names := make([]string, 0, len(metricFamiliesByName)) + for name := range metricFamiliesByName { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + if err := encoder.Encode(metricFamiliesByName[name]); err != nil { + return err + } + } + return nil +} + +func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := fnv.New64a() + var buf bytes.Buffer + buf.WriteString(metricFamily.GetName()) + buf.WriteByte(separatorByte) + h.Write(buf.Bytes()) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. Label pairs must be sorted by contract. But the point of this + // method is to check for contract violations. So we better do the sort + // now. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + buf.Reset() + buf.WriteString(lp.GetValue()) + buf.WriteByte(separatorByte) + h.Write(buf.Bytes()) + } + metricHash := h.Sum64() + if _, exists := metricHashes[metricHash]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + metricHashes[metricHash] = struct{}{} + + if desc == nil { + return nil // Nothing left to check if we have no desc. + } + + // Desc consistency with metric family. + if metricFamily.GetName() != desc.fqName { + return fmt.Errorf( + "collected metric %s %s has name %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, + ) + } + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + + r.mtx.RLock() // Remaining checks need the read lock. + defer r.mtx.RUnlock() + + // Is the desc registered? + if _, exist := r.descIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + + return nil +} + +func (r *registry) getBuf() *bytes.Buffer { + select { + case buf := <-r.bufPool: + return buf + default: + return &bytes.Buffer{} + } +} + +func (r *registry) giveBuf(buf *bytes.Buffer) { + buf.Reset() + select { + case r.bufPool <- buf: + default: + } +} + +func (r *registry) getMetricFamily() *dto.MetricFamily { + select { + case mf := <-r.metricFamilyPool: + return mf + default: + return &dto.MetricFamily{} + } +} + +func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { + mf.Reset() + select { + case r.metricFamilyPool <- mf: + default: + } +} + +func (r *registry) getMetric() *dto.Metric { + select { + case m := <-r.metricPool: + return m + default: + return &dto.Metric{} + } +} + +func (r *registry) giveMetric(m *dto.Metric) { + m.Reset() + select { + case r.metricPool <- m: + default: + } +} + +func newRegistry() *registry { + return ®istry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + bufPool: make(chan *bytes.Buffer, numBufs), + metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), + metricPool: make(chan *dto.Metric, numMetrics), + } +} + +func newDefaultRegistry() *registry { + r := newRegistry() + r.Register(NewProcessCollector(os.Getpid(), "")) + r.Register(NewGoCollector()) + return r +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 00000000..fe81e004 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,540 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "hash/fnv" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var ( + // DefObjectives are the default Summary quantile values. + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge +// method of perk/quantile is actually not working as advertised - and it might +// be unfixable, as the underlying algorithm is apparently not capable of +// merging summaries in the first place. To avoid using Merge, we are currently +// adding observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.Init(s) // Init self-collection. + return s +} + +type summary struct { + SelfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 00000000..c65ab1c5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "hash/fnv" + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 00000000..b54ac11e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,234 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + SelfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.Init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + SelfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.Init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 00000000..a1f3bdf3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,247 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "hash" + "sync" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects not only children, but also hash and buf. + children map[uint64]Metric + desc *Desc + + // hash is our own hash instance to avoid repeated allocations. + hash hash.Hash64 + // buf is used to copy string contents into it for hashing, + // again to avoid allocations. + buf bytes.Buffer + + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metric := range m.children { + ch <- metric + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + return m.getOrCreateMetric(h, lvs...), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + lvs := make([]string, len(labels)) + for i, label := range m.desc.variableLabels { + lvs[i] = labels[label] + } + return m.getOrCreateMetric(h, lvs...), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + m.hash.Reset() + for _, val := range vals { + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) + } + return m.hash.Sum64(), nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + m.hash.Reset() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) + } + return m.hash.Sum64(), nil +} + +func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { + metric, ok := m.children[hash] + if !ok { + // Copy labelValues. Otherwise, they would be allocated even if we don't go + // down this code path. + copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) + metric = m.newMetric(copiedLabelValues...) + m.children[hash] = metric + } + return metric +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 00000000..20110e41 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 00000000..b065f868 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 00000000..636a2c1a --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 00000000..487fdc6c --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,412 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +func (sd *SampleDecoder) Decode(s *model.Vector) error { + if err := sd.Dec.Decode(&sd.f); err != nil { + return err + } + *s = extractSamples(&sd.f, sd.Opts) + return nil +} + +// Extract samples builds a slice of samples from the provided metric families. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { + var all model.Vector + for _, f := range fams { + all = append(all, extractSamples(f, o)...) + } + return all +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f) + case dto.MetricType_GAUGE: + return extractGauge(o, f) + case dto.MetricType_SUMMARY: + return extractSummary(o, f) + case dto.MetricType_UNTYPED: + return extractUntyped(o, f) + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f) + } + panic("expfmt.extractSamples: unknown metric family type") +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 00000000..11839ed6 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 00000000..366fbde9 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,40 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A package for reading and writing Prometheus metrics. +package expfmt + +type Format string + +const ( + TextVersion = "0.0.4" + + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + + // fmtJSON2 is hidden as it is deprecated. + fmtJSON2 Format = `application/json; version=0.0.2` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 00000000..dc2eedee --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 00000000..1e060879 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. This function does not perform checks on the +// content of the metric and label names, i.e. invalid metric or label names +// will result in invalid text format output. +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 00000000..bd170b16 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,753 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// nil value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 00000000..7723656d --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 00000000..648b38cb --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 00000000..35e739c7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 00000000..fc4de410 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 00000000..038fc1c9 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 00000000..3b72e7ff --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,206 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 00000000..5f931cdb --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !LabelNameRE.MatchString(string(ln)) { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 00000000..a5da59a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := Metric{} + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 00000000..a7b96917 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 00000000..8762b13c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 00000000..7538e299 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 00000000..548968ae --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,249 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// StringToDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 00000000..dbf5d10e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,403 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + if s.Value.Equal(o.Value) { + return false + } + + return true +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml new file mode 100644 index 00000000..2b4554da --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.travis.yml @@ -0,0 +1,5 @@ +sudo: false +language: go +go: + - 1.5 + - 1.6 diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md new file mode 100644 index 00000000..0c802dd8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/AUTHORS.md @@ -0,0 +1,20 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Tobias Schmidt + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Armen Baghumian +* Bjoern Rabenstein +* David Cournapeau +* Ji-Hoon, Seol +* Jonas Große Sundrup +* Julius Volz +* Matthias Rampke +* Nicky Gerritsen +* Rémi Audebert +* Tobias Schmidt diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 00000000..5705f0fb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 00000000..c264a49d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,6 @@ +ci: + ! gofmt -l *.go | read nothing + go vet + go test -v ./... + go get github.com/golang/lint/golint + golint *.go diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 00000000..53c5e9aa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 00000000..6e7ee6b8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,10 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 00000000..e2acd6d4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 00000000..49aaab05 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,33 @@ +package procfs + +import ( + "fmt" + "os" + "path" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 00000000..e7012f73 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,224 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The transport protocol (TCP, UDP). + Proto string + // The remote (real) IP address. + RemoteAddress net.IP + // The remote (real) port. + RemotePort uint16 + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(string(scanner.Text())) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + tmp := strings.SplitN(s, ":", 2) + + if len(tmp) != 2 { + return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) + } + + if len(tmp[0]) != 8 && len(tmp[0]) != 32 { + return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + } + + ip, err := hex.DecodeString(tmp[0]) + if err != nil { + return nil, 0, err + } + + port, err := strconv.ParseUint(tmp[1], 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 00000000..d7a248c0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,138 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 00000000..0d0a6a90 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,212 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 00000000..b4e31d7b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + if err != nil { + return pio, err + } + + return pio, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 00000000..2df997ce --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,137 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int + // Maximum size of files that the process may create. + FileSize int + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int + // Maximum size of the process stack in bytes. + StackSize int + // Maximum size of a core file. + CoreFileSize int + // Limit of the process's resident set in pages. + ResidentSet int + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return int(i), nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 00000000..724e271b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 00000000..1ca217e8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,56 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { + continue + } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil + } + if err := s.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 00000000..36d1a84d --- /dev/null +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 00000000..94ec5306 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml new file mode 100644 index 00000000..e11869ba --- /dev/null +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.3.3 + - 1.4.2 + - 1.5.1 + - tip +before_install: + - mkdir -p bin + - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck + - chmod +x bin/shellcheck +script: + - PATH=$PATH:$PWD/bin go test -v ./... + - go build diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 00000000..298f0e26 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 00000000..f326c7fa --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,877 @@ +![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) + +Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. + +Many of the most widely used Go projects are built using Cobra including: + +* [Kubernetes](http://kubernetes.io/) +* [Hugo](http://gohugo.io) +* [rkt](https://github.com/coreos/rkt) +* [etcd](https://github.com/coreos/etcd) +* [Docker (distribution)](https://github.com/docker/distribution) +* [OpenShift](https://www.openshift.com/) +* [Delve](https://github.com/derekparker/delve) +* [GopherJS](http://www.gopherjs.org/) +* [CockroachDB](http://www.cockroachlabs.com/) +* [Bleve](http://www.blevesearch.com/) +* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +* [Parse (CLI)](https://parse.com/) +* [GiantSwarm's swarm](https://github.com/giantswarm/cli) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) + + +[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) +[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) + +![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra is also an application that will generate your application scaffolding to rapidly +develop a Cobra-based application. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Automatic detailed help for `app help [command]` +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated bash autocomplete for your application +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibilty to define your own help, usage, etc. +* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps + +Cobra has an exceptionally clean interface and simple design without needless +constructors or initialization methods. + +Applications built with Cobra commands are designed to be as user-friendly as +possible. Flags can be placed before or after the command (as long as a +confusing space isn’t provided). Both short and long flags can be used. A +command need not even be fully typed. Help is automatically generated and +available for the application or for a specific command using either the help +command or the `--help` flag. + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications will read like sentences when used. Users will know how +to use the application because they will natively understand how to use it. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE.` + or +`APPNAME COMMAND ARG --FLAG` + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + > hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + > git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +A Command has the following structure: + +```go +type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. +} +``` + +## Flags + +A Flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with its corresponding flags, then the +tree is assigned to the commander which is finally executed. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. This command will install the `cobra` generator executible +along with the library: + + > go get -v github.com/spf13/cobra/cobra + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Getting Started + +While you are welcome to provide your own organization, typically a Cobra based +application will follow the following organizational structure. + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +### cobra init + +The `cobra init [yourApp]` command will create your initial application code +for you. It is a very powerful application that will populate your program with +the right structure so you can immediately enjoy all the benefits of Cobra. It +will also automatically apply the license you specify to your application. + +Cobra init is pretty smart. You can provide it a full path, or simply a path +similar to what is expected in the import. + +``` +cobra init github.com/spf13/newAppName +``` + +### cobra add + +Once an application is initialized Cobra can create additional commands for you. +Let's say you created an app and you wanted the following commands for it: + +* app serve +* app config +* app config create + +In your project directory (where your main.go file is) you would run the following: + +``` +cobra add serve +cobra add config +cobra add create -p 'configCmd' +``` + +Once you have run these three commands you would have an app structure that would look like: + +``` + ▾ app/ + ▾ cmd/ + serve.go + config.go + create.go + main.go +``` + +at this point you can run `go run main.go` and it would run your app. `go run +main.go serve`, `go run main.go config`, `go run main.go config create` along +with `go run main.go help serve`, etc would all work. + +Obviously you haven't added your own code to these yet, the commands are ready +for you to give them their tasks. Have fun. + +### Configuring the cobra generator + +The cobra generator will be easier to use if you provide a simple configuration +file which will help you eliminate providing a bunch of repeated information in +flags over and over. + +an example ~/.cobra.yaml file: + +```yaml +author: Steve Francia +license: MIT +``` + +## Manually implementing Cobra + +To manually implement cobra you need to create a bare main.go file and a RootCmd file. +You will optionally provide additional commands as you see fit. + +### Create the root command + +The root command represents your binary itself. + + +#### Manually create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var RootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} +``` + +You will additionally define flags and handle configuration in your init() function. + +for example cmd/root.go: + +```go +func init() { + cobra.OnInitialize(initConfig) + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "github.com/spf13/cobra" +) + +func init() { + RootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Attach command to its parent + + +If you notice in the above example we attach the command to its parent. In +this case the parent is the rootCmd. In this example we are attaching it to the +root, but commands can be attached at any level. + +```go +RootCmd.AddCommand(versionCmd) +``` + +### Remove a command from its parent + +Removing a command is not a common action in simple programs, but it allows 3rd +parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing +root command, and we replace it with our own version: + +```go +mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) +mainlib.RootCmd.AddCommand(versionCmd) +``` + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + +```go +RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + hugo is the main command, used to build your Hugo site. + + Hugo is a Fast and Flexible Static Site Generator + built with love by spf13 and friends in Go. + + Complete documentation is available at http://gohugo.io/. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + + Use "hugo [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use. + +The default help command is + +```go +func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) +} +``` + +You can provide your own command, function or template through the following methods: + +```go +command.SetHelpCommand(cmd *Command) + +command.SetHelpFunc(f func(*Command, []string)) + +command.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. + +The default usage function is: + +```go +return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err +} +``` + +Like help, the function and template are overridable through public methods: + +```go +command.SetUsageFunc(f func(*Command) error) + +command.SetUsageTemplate(s string) +``` + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + + +## Alternative Error Handling + +Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, +providing a way to handle the errors in one location. The current list of functions that return an error is: + +* PersistentPreRunE +* PreRunE +* RunE +* PostRunE +* PersistentPostRunE + +If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` +and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent +command. + +**Example Usage using RunE:** + +```go +package main + +import ( + "errors" + "log" + + "github.com/spf13/cobra" +) + +func main() { + var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + RunE: func(cmd *cobra.Command, args []string) error { + // Do Stuff Here + return errors.New("some random error") + }, + } + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating Markdown-formatted documentation for your command + +Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). + +## Generating man pages for your command + +Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). + +## Generating bash completions for your command + +Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which, when called, will print +out everything Cobra knows about the flags for each command. + +### Example + +```go +command.DebugFlags() +``` + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## Extensions + +Libraries for extending Cobra: + +* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13), +[eparis](https://github.com/eparis), +[bep](https://github.com/bep), and many more! + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 00000000..3f33bb0e --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,602 @@ +package cobra + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func preamble(out io.Writer, name string) error { + _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) + if err != nil { + return err + } + _, err = fmt.Fprint(out, ` +__debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__my_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__handle_reply() +{ + __debug "${FUNCNAME[0]}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __index_of_word "${flag}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + COMPREPLY=() + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION}" ]; then + # zfs completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions=("${must_have_one_flag[@]}") + elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + else + completions=("${commands[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + __ltrim_colon_completions "$cur" +} + +# The arguments should be in the form "ext1|ext2|extn" +__handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__handle_flag() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __debug "${FUNCNAME[0]}: looking for ${flagname}" + if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # keep flag value with flagname as flaghash + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + + # skip the argument to a two word flag + if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__handle_noun() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__handle_command() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_$(basename "${words[c]//:/__}")" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F $next_command >/dev/null && $next_command +} + +__handle_word() +{ + if [[ $c -ge $cword ]]; then + __handle_reply + return + fi + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __handle_flag + elif __contains_word "${words[c]}" "${commands[@]}"; then + __handle_command + elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then + __handle_command + else + __handle_noun + fi + __handle_word +} + +`) + return err +} + +func postscript(w io.Writer, name string) error { + name = strings.Replace(name, ":", "__", -1) + _, err := fmt.Fprintf(w, "__start_%s()\n", name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `{ + local cur prev words cword + declare -A flaghash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __my_init_completion -n "=" || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __handle_word +} + +`, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") + return err +} + +func writeCommands(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { + return err + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { + return err + } + } + _, err := fmt.Fprintf(w, "\n") + return err +} + +func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + + if len(value) > 0 { + ext := "__handle_filename_extension_flag " + strings.Join(value, "|") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + case BashCompCustom: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + if len(value) > 0 { + handlers := strings.Join(value, "; ") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers) + } else { + _, err = fmt.Fprintf(w, " flags_completion+=(:)\n") + } + if err != nil { + return err + } + case BashCompSubdirsInDir: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + + if len(value) == 1 { + ext := "__handle_subdirs_in_dir_flag " + value[0] + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir -d" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + } + } + return nil +} + +func writeShortFlag(flag *pflag.Flag, w io.Writer) error { + b := (flag.Value.Type() == "bool") + name := flag.Shorthand + format := " " + if !b { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("-"+name, flag.Annotations, w) +} + +func writeFlag(flag *pflag.Flag, w io.Writer) error { + b := (flag.Value.Type() == "bool") + name := flag.Name + format := " flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("--"+name, flag.Annotations, w) +} + +func writeFlags(cmd *Command, w io.Writer) error { + _, err := fmt.Fprintf(w, ` flags=() + two_word_flags=() + flags_with_completion=() + flags_completion=() + +`) + if err != nil { + return err + } + var visitErr error + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + + _, err = fmt.Fprintf(w, "\n") + return err +} + +func writeRequiredFlag(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { + return err + } + flags := cmd.NonInheritedFlags() + var visitErr error + flags.VisitAll(func(flag *pflag.Flag) { + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + b := (flag.Value.Type() == "bool") + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { + visitErr = err + return + } + + if len(flag.Shorthand) > 0 { + if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { + visitErr = err + return + } + } + } + } + }) + return visitErr +} + +func writeRequiredNouns(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func writeArgAliases(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ArgAliases)) + for _, value := range cmd.ArgAliases { + if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func gen(cmd *Command, w io.Writer) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if err := gen(c, w); err != nil { + return err + } + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + commandName = strings.Replace(commandName, ":", "__", -1) + if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { + return err + } + if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { + return err + } + if err := writeCommands(cmd, w); err != nil { + return err + } + if err := writeFlags(cmd, w); err != nil { + return err + } + if err := writeRequiredFlag(cmd, w); err != nil { + return err + } + if err := writeRequiredNouns(cmd, w); err != nil { + return err + } + if err := writeArgAliases(cmd, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { + return err + } + return nil +} + +func (cmd *Command) GenBashCompletion(w io.Writer) error { + if err := preamble(w, cmd.Name()); err != nil { + return err + } + if len(cmd.BashCompletionFunction) > 0 { + if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { + return err + } + } + if err := gen(cmd, w); err != nil { + return err + } + return postscript(w, cmd.Name()) +} + +func (cmd *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return cmd.GenBashCompletion(outFile) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. +func (cmd *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(cmd.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. +func (cmd *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(cmd.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (cmd *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(cmd.Flags(), name, f) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md new file mode 100644 index 00000000..84d5b012 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -0,0 +1,206 @@ +# Generating Bash Completions For Your Own cobra.Command + +Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: + +```go +package main + +import ( + "io/ioutil" + "os" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" +) + +func main() { + kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) + kubectl.GenBashCompletionFile("out.sh") +} +``` + +That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. + +## Creating your own custom functions + +Some more actual code that works in kubernetes: + +```bash +const ( + bash_completion_func = `__kubectl_parse_get() +{ + local kubectl_output out + if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then + out=($(echo "${kubectl_output}" | awk '{print $1}')) + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__kubectl_get_resource() +{ + if [[ ${#nouns[@]} -eq 0 ]]; then + return 1 + fi + __kubectl_parse_get ${nouns[${#nouns[@]} -1]} + if [[ $? -eq 0 ]]; then + return 0 + fi +} + +__custom_func() { + case ${last_command} in + kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) + __kubectl_get_resource + return + ;; + *) + ;; + esac +} +`) +``` + +And then I set that in my command definition: + +```go +cmds := &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster manager", + Long: `kubectl controls the Kubernetes cluster manager. + +Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, + Run: runHelp, + BashCompletionFunction: bash_completion_func, +} +``` + +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! + +## Have the completions code complete your 'nouns' + +In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: + +```go +validArgs []string = { "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + err := RunGet(f, out, cmd, args) + util.CheckErr(err) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like + +```bash +# kubectl get [tab][tab] +node pod replicationcontroller service +``` + +## Plural form and shortcuts for nouns + +If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go` +argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion aglorithm if entered manually, e.g. in: + +```bash +# kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns +in this example again instead of the replication controllers. + +## Mark flags as required + +Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +# kubectl exec [tab][tab][tab] +-c --container= -p --pod= +``` + +# Specify valid filename extensions for flags that take a filename + +In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. + +```go + annotations := []string{"json", "yaml", "yml"} + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = annotations + + flag := &pflag.Flag{ + Name: "filename", + Shorthand: "f", + Usage: usage, + Value: value, + DefValue: value.String(), + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +Now when you run a command with this filename flag you'll get something like + +```bash +# kubectl create -f +test/ example/ rpmbuild/ +hello.yml test.json +``` + +So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. + +# Specifiy custom flag completion + +Similar to the filename completion and filtering usingn cobra.BashCompFilenameExt, you can specifiy +a custom flag completion function with cobra.BashCompCustom: + +```go + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} + + flag := &pflag.Flag{ + Name: "namespace", + Usage: usage, + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +value, e.g.: + +```bash +__kubectl_get_namespaces() +{ + local template + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + local kubectl_out + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) + fi +} +``` diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 00000000..0c4e2e5d --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,171 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() + +// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. +// Set this to true to enable it +var EnablePrefixMatching = false + +//AddTemplateFunc adds a template function that's available to Usage and Help +//template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +//AddTemplateFuncs adds multiple template functions availalble to Usage and +//Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +//OnInitialize takes a series of func() arguments and appends them to a slice of func(). +func OnInitialize(y ...func()) { + for _, x := range y { + initializers = append(initializers, x) + } +} + +//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +//ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +//rpad adds padding to the right of a string +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 00000000..0efeeb19 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1193 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + flag "github.com/spf13/pflag" +) + +// Command is just that, a command for your application. +// eg. 'go run' ... 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Name is the command name, usually the executable's name. + name string + // The one-line usage message. + Use string + // An array of aliases that can be used instead of the first word in Use. + Aliases []string + // An array of command names for which this command will be suggested - similar to aliases but only suggests. + SuggestFor []string + // The short description shown in the 'help' output. + Short string + // The long message shown in the 'help ' output. + Long string + // Examples of how to use the command + Example string + // List of all valid non-flag arguments that are accepted in bash completions + ValidArgs []string + // List of aliases for ValidArgs. These are not suggested to the user in the bash + // completion, but accepted if entered manually. + ArgAliases []string + // Custom functions used by the bash autocompletion generator + BashCompletionFunction string + // Is this command deprecated and should print this string when used? + Deprecated string + // Is this command hidden and should NOT show up in the list of available commands? + Hidden bool + // Full set of flags + flags *flag.FlagSet + // Set of flags childrens of this command will inherit + pflags *flag.FlagSet + // Flags that are declared specifically by this command (not inherited). + lflags *flag.FlagSet + // SilenceErrors is an option to quiet errors down stream + SilenceErrors bool + // Silence Usage is an option to silence usage when an error occurs. + SilenceUsage bool + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name + // PersistentPreRun: children of this command will inherit and execute + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this + Run func(cmd *Command, args []string) + // RunE: Run but returns an error + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error + PersistentPostRunE func(cmd *Command, args []string) error + // DisableAutoGenTag remove + DisableAutoGenTag bool + // Commands is the list of commands supported by this program. + commands []*Command + // Parent Command for this command + parent *Command + // max lengths of commands' string lengths for use in padding + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + flagErrorBuf *bytes.Buffer + + args []string // actual args parsed from flags + output *io.Writer // nil means stderr; use Out() method instead + usageFunc func(*Command) error // Usage can be defined by application + usageTemplate string // Can be defined by Application + helpTemplate string // Can be defined by Application + helpFunc func(*Command, []string) // Help can be defined by application + helpCommand *Command // The help command + // The global normalization function that we can use on every pFlag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages + DisableSuggestions bool + // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 + SuggestionsMinimumDistance int +} + +// os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return *c.output + } + + if c.HasParent() { + return c.parent.Out() + } + return def +} + +func (c *Command) Out() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = &output +} + +// Usage can be defined by application +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// Can be defined by Application +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// Can be defined by Application +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// Can be defined by Application +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + + if c.HasParent() { + return c.parent.UsageFunc() + } + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + if err != nil { + fmt.Print(err) + } + return err + } +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function which calls c.Help() +func (c *Command) HelpFunc() func(*Command, []string) { + cmd := c + for cmd != nil { + if cmd.helpFunc != nil { + return cmd.helpFunc + } + cmd = cmd.parent + } + return func(*Command, []string) { + err := c.Help() + if err != nil { + c.Println(err) + } + } +} + +var minUsagePadding = 25 + +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } + return c.parent.commandsMaxUseLen +} + +var minCommandPathPadding = 11 + +// +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } + return c.parent.commandsMaxCommandPathLen +} + +var minNamePadding = 11 + +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } + return c.parent.commandsMaxNameLen +} + +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } + return `Usage:{{if .Runnable}} + {{if .HasFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasSubCommands}} + {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} +{{end}}{{if .HasExample}} + +Examples: +{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasSubCommands }} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` +} + +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } + return `{{with or .Long .Short }}{{. | trim}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// Really only used when casting a command to a commander +func (c *Command) resetChildrensParents() { + for _, x := range c.commands { + x.parent = c + } +} + +// Test if the named flag is a boolean flag. +func isBooleanFlag(name string, f *flag.FlagSet) bool { + flag := f.Lookup(name) + if flag == nil { + return false + } + return flag.Value.Type() == "bool" +} + +// Test if the named flag is a boolean flag. +func isBooleanShortFlag(name string, f *flag.FlagSet) bool { + result := false + f.VisitAll(func(f *flag.Flag) { + if f.Shorthand == name && f.Value.Type() == "bool" { + result = true + } + }) + return result +} + +func stripFlags(args []string, c *Command) []string { + if len(args) < 1 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + + inQuote := false + inFlag := false + for _, y := range args { + if !inQuote { + switch { + case strings.HasPrefix(y, "\""): + inQuote = true + case strings.Contains(y, "=\""): + inQuote = true + case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !isBooleanFlag(y[2:], c.Flags()) + case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): + inFlag = true + case inFlag: + inFlag = false + case y == "": + // strip empty commands, as the go tests expect this to be ok.... + case !strings.HasPrefix(y, "-"): + commands = append(commands, y) + inFlag = false + } + } + + if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { + inQuote = false + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +// find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + if c == nil { + return nil, nil, fmt.Errorf("Called find() on a nil Command") + } + + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == nextSubCmd || cmd.HasAlias(nextSubCmd) { // exact name or alias match + return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + } + if EnablePrefixMatching { + if strings.HasPrefix(cmd.Name(), nextSubCmd) { // prefix match + matches = append(matches, cmd) + } + for _, x := range cmd.Aliases { + if strings.HasPrefix(x, nextSubCmd) { + matches = append(matches, cmd) + } + } + } + } + + // only accept a single prefix match - multiple matches would be ambiguous + if len(matches) == 1 { + return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) + } + + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + argsWOflags := stripFlags(a, commandFound) + + // no subcommand, always take args + if !commandFound.HasSubCommands() { + return commandFound, a, nil + } + + // root command with subcommands, do subcommand checking + if commandFound == c && len(argsWOflags) > 0 { + suggestionsString := "" + if !c.DisableSuggestions { + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + if suggestions := c.SuggestionsFor(argsWOflags[0]); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + } + return commandFound, a, fmt.Errorf("unknown command %q for %q%s", argsWOflags[0], commandFound.CommandPath(), suggestionsString) + } + + return commandFound, a, nil +} + +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +func (c *Command) VisitParents(fn func(*Command)) { + var traverse func(*Command) *Command + + traverse = func(x *Command) *Command { + if x != c { + fn(x) + } + if x.HasParent() { + return traverse(x.parent) + } + return x + } + traverse(c) +} + +func (c *Command) Root() *Command { + var findRoot func(*Command) *Command + + findRoot = func(x *Command) *Command { + if x.HasParent() { + return findRoot(x.parent) + } + return x + } + + return findRoot(c) +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. (Description from +// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help flag as the last point possible to allow for user + // overriding + c.initHelpFlag() + + err = c.ParseFlags(a) + if err != nil { + return err + } + // If help is called, regardless of other flags, return we want help + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in initHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + if helpVal || !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + argWoFlags := c.Flags().Args() + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) errorMsgFromParse() string { + s := c.flagErrorBuf.String() + + x := strings.Split(s, "\n") + + if len(x) > 0 { + return x[0] + } + return "" +} + +// Call execute to use the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +func (c *Command) ExecuteC() (cmd *Command, err error) { + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help as the last point possible to allow for user + // overriding + c.initHelpCmd() + + var args []string + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } else { + args = c.args + } + + cmd, flags, err := c.Find(args) + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.Println("Error:", err.Error()) + c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if err == flag.ErrHelp { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilentErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.Println("Error:", err.Error()) + } + + // If root command has SilentUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + return cmd, err + } + return cmd, nil +} + +func (c *Command) initHelpFlag() { + if c.Flags().Lookup("help") == nil { + c.Flags().BoolP("help", "h", false, "help for "+c.Name()) + } +} + +func (c *Command) initHelpCmd() { + if c.helpCommand == nil { + if !c.HasSubCommands() { + return + } + + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + PersistentPreRun: func(cmd *Command, args []string) {}, + PersistentPostRun: func(cmd *Command, args []string) {}, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + c.Root().Usage() + } else { + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + } + }, + } + } + c.AddCommand(c.helpCommand) +} + +// Used for testing +func (c *Command) ResetCommands() { + c.commands = nil + c.helpCommand = nil +} + +//Commands returns a slice of child commands. +func (c *Command) Commands() []*Command { + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + } +} + +// AddCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Print is a convenience method to Print to the defined output +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.Out(), i...) +} + +// Println is a convenience method to Println to the defined output +func (c *Command) Println(i ...interface{}) { + str := fmt.Sprintln(i...) + c.Print(str) +} + +// Printf is a convenience method to Printf to the defined output +func (c *Command) Printf(format string, i ...interface{}) { + str := fmt.Sprintf(format, i...) + c.Print(str) +} + +// Output the usage for the command +// Used when a user provides invalid input +// Can be defined by user by overriding UsageFunc +func (c *Command) Usage() error { + c.mergePersistentFlags() + err := c.UsageFunc()(c) + return err +} + +// Output the help for the command +// Used when a user calls help [command] +// by the default HelpFunc in the commander +func (c *Command) Help() error { + c.mergePersistentFlags() + err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) + return err +} + +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + str := c.Name() + x := c + for x.HasParent() { + str = x.parent.Name() + " " + str + x = x.parent + } + return str +} + +//The full usage for a given command (including parents) +func (c *Command) UseLine() string { + str := "" + if c.HasParent() { + str = c.parent.CommandPath() + " " + } + return str + c.Use +} + +// For use in determining which flags have been assigned to which commands +// and which persist +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() { + if x.persistentFlag(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + if c.name != "" { + return c.name + } + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// HasAlias determines if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Runnable determines if the command is itself runnable +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// HasSubCommands determines if the command has children commands +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands) +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsHelpCommand determines if a command is a 'help' command; a help command is +// determined by the fact that it is NOT runnable/hidden/deprecated, and has no +// sub commands that are runnable/hidden/deprecated +func (c *Command) IsHelpCommand() bool { + + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsHelpCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any avilable 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics' +func (c *Command) HasHelpSubCommands() bool { + + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsHelpCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands' +func (c *Command) HasAvailableSubCommands() bool { + + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub comamnds, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// Determine if the command is a child command +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + return c.flags +} + +// Get the local FlagSet specifically set in the current command +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags.VisitAll(func(f *flag.Flag) { + local.AddFlag(f) + }) + if !c.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if local.Lookup(f.Name) == nil { + local.AddFlag(f) + } + }) + } + return local +} + +// All Flags which were inherited from parents commands +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + local := c.LocalFlags() + + var rmerge func(x *Command) + + rmerge = func(x *Command) { + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + inherited.AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + if c.HasParent() { + rmerge(c.parent) + } + + return inherited +} + +// All Flags which were not inherited from parent commands +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// Get the Persistent FlagSet specifically set in the current command +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// For use in testing +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) +} + +// Does the command contain any flags (local plus persistent from the entire structure) +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// Does the command contain persistent flags +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// Does the command has flags specifically declared locally +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// Flag climbs up the command tree looking for matching flag +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// recursively find matching persistent flag +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil && c.HasParent() { + flag = c.parent.persistentFlag(name) + } + return +} + +// ParseFlags parses persistent flag tree & local flags +func (c *Command) ParseFlags(args []string) (err error) { + c.mergePersistentFlags() + err = c.Flags().Parse(args) + return +} + +// Parent returns a commands parent command +func (c *Command) Parent() *Command { + return c.parent +} + +func (c *Command) mergePersistentFlags() { + var rmerge func(x *Command) + + // Save the set of local flags + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + addtolocal := func(f *flag.Flag) { + c.lflags.AddFlag(f) + } + c.Flags().VisitAll(addtolocal) + c.PersistentFlags().VisitAll(addtolocal) + } + rmerge = func(x *Command) { + if !x.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if x.PersistentFlags().Lookup(f.Name) == nil { + x.PersistentFlags().AddFlag(f) + } + }) + } + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if c.Flags().Lookup(f.Name) == nil { + c.Flags().AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + rmerge(c) +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 00000000..6159c1cc --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,5 @@ +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 00000000..4b0eaa1b --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,26 @@ +// +build windows + +package cobra + +import ( + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +// enables an information splash screen on Windows if the CLI is started from explorer.exe. +var MousetrapHelpText string = `This is a command line tool + +You need to open cmd.exe and run it from there. +` + +func preExecHook(c *Command) { + if mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go new file mode 100644 index 00000000..e3170e33 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + // TODO(djd): Respect any existing value of req.Cancel. + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go new file mode 100644 index 00000000..56bcbadb --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000..26a5e19a --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + cancel() + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancel() + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 00000000..a035125c --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000..46aa2b12 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000..d02f24fd --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..0d514173 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 00000000..4a554cb9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 00000000..65dc3473 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,83 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 00000000..2f9b1543 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000..78f80898 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 00000000..2077d986 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,145 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" page under "APIs & Auth" for your +// project at https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var key struct { + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(jsonKey, &key); err != nil { + return nil, err + } + return &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + Scopes: scope, + TokenURL: JWTTokenURL, + }, nil +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000..01ba0ecb --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000..dc8ebfc4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000..ea6716c9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,213 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://www.googleapis.com/", + "https://github.com/", + "https://api.instagram.com/", + "https://www.douban.com/", + "https://api.dropbox.com/", + "https://api.soundcloud.com/", + "https://www.linkedin.com/", + "https://api.twitch.tv/", + "https://oauth.vk.com/", + "https://api.odnoklassniki.ru/", + "https://connect.stripe.com/", + "https://api.pushbullet.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://www.strava.com/oauth/", + "https://app.box.com/", + "https://test-sandbox.auth.corp.google.com", + "https://user.gini.net/", +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000..521e7b49 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,67 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 00000000..396b3fac --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,160 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides encoding and decoding utilities for +// signed JWS messages. +package jws + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion + Iat int64 `json:"iat"` // the time the assertion was issued. + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` + + exp time.Time + iat time.Time +} + +func (c *ClaimSet) encode() (string, error) { + if c.exp.IsZero() || c.iat.IsZero() { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + c.iat = now + c.exp = now.Add(time.Hour) + } + + c.Exp = c.exp.Unix() + c.Iat = c.iat.Unix() + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64Encode(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64Encode(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64Encode(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64Decode(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Encode encodes a signed JWS with provided header and claim set. +func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + h := sha256.New() + h.Write([]byte(ss)) + b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) + if err != nil { + return "", err + } + sig := base64Encode(b) + return fmt.Sprintf("%s.%s", ss, sig), nil +} + +// base64Encode returns and Base64url encoded version of the input string with any +// trailing "=" stripped. +func base64Encode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// base64Decode decodes the Base64url encoded string +func base64Decode(s string) ([]byte, error) { + // add back missing padding + switch len(s) % 4 { + case 2: + s += "==" + case 3: + s += "=" + } + return base64.URLEncoding.DecodeString(s) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 00000000..205d23ed --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,147 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + payload, err := jws.Encode(defaultHeader, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000..dfcf238d --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,325 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 00000000..ebbdddbd --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,143 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if vals, ok := t.raw.(url.Values); ok { + // TODO(jbd): Cast numeric values to int64 or float64. + return vals.Get(key) + } + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + return nil +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000..90db0883 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/google.golang.org/cloud/LICENSE b/vendor/google.golang.org/cloud/LICENSE new file mode 100644 index 00000000..a4c5efd8 --- /dev/null +++ b/vendor/google.golang.org/cloud/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/google.golang.org/cloud/compute/metadata/metadata.go new file mode 100644 index 00000000..0a709598 --- /dev/null +++ b/vendor/google.golang.org/cloud/compute/metadata/metadata.go @@ -0,0 +1,382 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "google.golang.org/cloud/internal" +) + +// metadataIP is the documented metadata server IP address. +const metadataIP = "169.254.169.254" + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + onGCE.v = testOnGCE() + return onGCE.v +} + +func testOnGCE() bool { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + return <-resc +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/google.golang.org/cloud/internal/cloud.go new file mode 100644 index 00000000..59428803 --- /dev/null +++ b/vendor/google.golang.org/cloud/internal/cloud.go @@ -0,0 +1,128 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" + "sync" + + "golang.org/x/net/context" +) + +type contextKey struct{} + +func WithContext(parent context.Context, projID string, c *http.Client) context.Context { + if c == nil { + panic("nil *http.Client passed to WithContext") + } + if projID == "" { + panic("empty project ID passed to WithContext") + } + return context.WithValue(parent, contextKey{}, &cloudContext{ + ProjectID: projID, + HTTPClient: c, + }) +} + +const userAgent = "gcloud-golang/0.1" + +type cloudContext struct { + ProjectID string + HTTPClient *http.Client + + mu sync.Mutex // guards svc + svc map[string]interface{} // e.g. "storage" => *rawStorage.Service +} + +// Service returns the result of the fill function if it's never been +// called before for the given name (which is assumed to be an API +// service name, like "datastore"). If it has already been cached, the fill +// func is not run. +// It's safe for concurrent use by multiple goroutines. +func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { + return cc(ctx).service(name, fill) +} + +func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { + c.mu.Lock() + defer c.mu.Unlock() + + if c.svc == nil { + c.svc = make(map[string]interface{}) + } else if v, ok := c.svc[name]; ok { + return v + } + v := fill(c.HTTPClient) + c.svc[name] = v + return v +} + +// Transport is an http.RoundTripper that appends +// Google Cloud client's user-agent to the original +// request's user-agent header. +type Transport struct { + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +func ProjID(ctx context.Context) string { + return cc(ctx).ProjectID +} + +func HTTPClient(ctx context.Context) *http.Client { + return cc(ctx).HTTPClient +} + +// cc returns the internal *cloudContext (cc) state for a context.Context. +// It panics if the user did it wrong. +func cc(ctx context.Context) *cloudContext { + if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { + return c + } + panic("invalid context.Context type; it should be created with cloud.NewContext") +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go new file mode 100644 index 00000000..0b03ff9b --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package federation + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_federation_Cluster, + DeepCopy_federation_ClusterCondition, + DeepCopy_federation_ClusterList, + DeepCopy_federation_ClusterSpec, + DeepCopy_federation_ClusterStatus, + DeepCopy_federation_ServerAddressByClientCIDR, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_federation_Cluster(in Cluster, out *Cluster, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_federation_ClusterSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_federation_ClusterStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_federation_ClusterCondition(in ClusterCondition, out *ClusterCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_federation_ClusterList(in ClusterList, out *ClusterList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Cluster, len(in)) + for i := range in { + if err := DeepCopy_federation_Cluster(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_federation_ClusterSpec(in ClusterSpec, out *ClusterSpec, c *conversion.Cloner) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(in)) + for i := range in { + if err := DeepCopy_federation_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func DeepCopy_federation_ClusterStatus(in ClusterStatus, out *ClusterStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(in)) + for i := range in { + if err := DeepCopy_federation_ClusterCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.Zones != nil { + in, out := in.Zones, &out.Zones + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Zones = nil + } + out.Region = in.Region + return nil +} + +func DeepCopy_federation_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go b/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go new file mode 100644 index 00000000..a1dc24ca --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go @@ -0,0 +1,129 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/federation/apis/federation/v1beta1" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/federation/apis/federation" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", federation.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString( + "Cluster", + ) + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1beta1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(federation.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + federation.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1beta1.SchemeGroupVersion: + v1beta1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/register.go new file mode 100644 index 00000000..2cc7f1f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "federation" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Cluster{}, + &ClusterList{}, + &api.ListOptions{}, + &api.DeleteOptions{}, + ) +} + +func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/types.go new file mode 100644 index 00000000..10a9d8be --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/types.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// ClusterSpec describes the attributes of a kubernetes cluster. +type ClusterSpec struct { + // A map of client CIDR to server address. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR"` + // Name of the secret containing kubeconfig to access this cluster. + // The secret is read from the kubernetes cluster that is hosting federation control plane. + // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". + // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. + // This can be left empty if the cluster allows insecure access. + SecretRef *api.LocalObjectReference `json:"secretRef,omitempty"` +} + +type ClusterConditionType string + +// These are valid conditions of a cluster. +const ( + // ClusterReady means the cluster is ready to accept workloads. + ClusterReady ClusterConditionType = "Ready" + // ClusterOffline means the cluster is temporarily down or not reachable + ClusterOffline ClusterConditionType = "Offline" +) + +// ClusterCondition describes current state of a cluster. +type ClusterCondition struct { + // Type of cluster condition, Complete or Failed. + Type ClusterConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus `json:"status"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} + +// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. +type ClusterStatus struct { + // Conditions is an array of current cluster conditions. + Conditions []ClusterCondition `json:"conditions,omitempty"` + // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // These will always be in the same region. + Zones []string `json:"zones,omitempty"` + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + Region string `json:"region,omitempty"` +} + +// +genclient=true,nonNamespaced=true + +// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. +type Cluster struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the behavior of the Cluster. + Spec ClusterSpec `json:"spec,omitempty"` + // Status describes the current status of a Cluster + Status ClusterStatus `json:"status,omitempty"` +} + +// A list of all the kubernetes clusters registered to the federation +type ClusterList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty"` + + // List of Cluster objects. + Items []Cluster `json:"items"` +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go new file mode 100644 index 00000000..f8d396e8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + err := api.Scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "Cluster", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go new file mode 100644 index 00000000..5877d008 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go @@ -0,0 +1,296 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + federation "k8s.io/kubernetes/federation/apis/federation" + api "k8s.io/kubernetes/pkg/api" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_Cluster_To_federation_Cluster, + Convert_federation_Cluster_To_v1beta1_Cluster, + Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition, + Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition, + Convert_v1beta1_ClusterList_To_federation_ClusterList, + Convert_federation_ClusterList_To_v1beta1_ClusterList, + Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec, + Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec, + Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus, + Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus, + Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR, + Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { + return autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s) +} + +func autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { + return autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s) +} + +func autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { + out.Type = federation.ClusterConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s) +} + +func autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { + out.Type = ClusterConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { + return autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s) +} + +func autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]federation.Cluster, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Cluster_To_federation_Cluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s) +} + +func autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + if err := Convert_federation_Cluster_To_v1beta1_Cluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { + return autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s) +} + +func autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]federation.ServerAddressByClientCIDR, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(api.LocalObjectReference) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s) +} + +func autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + for i := range *in { + if err := Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { + return autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s) +} + +func autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]federation.ClusterCondition, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.Zones = in.Zones + out.Region = in.Region + return nil +} + +func Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s) +} + +func autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + if err := Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.Zones = in.Zones + out.Region = in.Region + return nil +} + +func Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { + return autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) +} + +func autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} + +func Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { + return autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s) +} + +func autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} + +func Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { + return autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go new file mode 100644 index 00000000..0d53b4a6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go @@ -0,0 +1,146 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1beta1_Cluster, + DeepCopy_v1beta1_ClusterCondition, + DeepCopy_v1beta1_ClusterList, + DeepCopy_v1beta1_ClusterSpec, + DeepCopy_v1beta1_ClusterStatus, + DeepCopy_v1beta1_ServerAddressByClientCIDR, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1beta1_Cluster(in Cluster, out *Cluster, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_ClusterSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_ClusterStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_ClusterCondition(in ClusterCondition, out *ClusterCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_v1beta1_ClusterList(in ClusterList, out *ClusterList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Cluster, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_Cluster(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1beta1_ClusterSpec(in ClusterSpec, out *ClusterSpec, c *conversion.Cloner) error { + if in.ServerAddressByClientCIDRs != nil { + in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.ServerAddressByClientCIDRs = nil + } + if in.SecretRef != nil { + in, out := in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + if err := v1.DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + return nil +} + +func DeepCopy_v1beta1_ClusterStatus(in ClusterStatus, out *ClusterStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(in)) + for i := range in { + if err := DeepCopy_v1beta1_ClusterCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.Zones != nil { + in, out := in.Zones, &out.Zones + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Zones = nil + } + out.Region = in.Region + return nil +} + +func DeepCopy_v1beta1_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { + out.ClientCIDR = in.ClientCIDR + out.ServerAddress = in.ServerAddress + return nil +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go new file mode 100644 index 00000000..b096ce2f --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go @@ -0,0 +1,24 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go new file mode 100644 index 00000000..cfdb87c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go new file mode 100644 index 00000000..e9193b60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go @@ -0,0 +1,1398 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto + + It has these top-level messages: + Cluster + ClusterCondition + ClusterList + ClusterSpec + ClusterStatus + ServerAddressByClientCIDR +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} + +func (m *ClusterCondition) Reset() { *m = ClusterCondition{} } +func (m *ClusterCondition) String() string { return proto.CompactTextString(m) } +func (*ClusterCondition) ProtoMessage() {} + +func (m *ClusterList) Reset() { *m = ClusterList{} } +func (m *ClusterList) String() string { return proto.CompactTextString(m) } +func (*ClusterList) ProtoMessage() {} + +func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } +func (m *ClusterSpec) String() string { return proto.CompactTextString(m) } +func (*ClusterSpec) ProtoMessage() {} + +func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } +func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterStatus) ProtoMessage() {} + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } +func (*ServerAddressByClientCIDR) ProtoMessage() {} + +func init() { + proto.RegisterType((*Cluster)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.Cluster") + proto.RegisterType((*ClusterCondition)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterCondition") + proto.RegisterType((*ClusterList)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterList") + proto.RegisterType((*ClusterSpec)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterSpec") + proto.RegisterType((*ClusterStatus)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterStatus") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ServerAddressByClientCIDR") +} +func (m *Cluster) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Cluster) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ClusterList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SecretRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n7, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ClusterStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Zones) > 0 { + for _, s := range m.Zones { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Region))) + i += copy(data[i:], m.Region) + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) + i += copy(data[i:], m.ClientCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) + i += copy(data[i:], m.ServerAddress) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Cluster) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterSpec) Size() (n int) { + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Zones) > 0 { + for _, s := range m.Zones { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Cluster) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ClusterConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Cluster{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ClusterCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zones = append(m.Zones, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto new file mode 100644 index 00000000..811f40f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto @@ -0,0 +1,114 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.federation.apis.federation.v1beta1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. +message Cluster { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Cluster. + optional ClusterSpec spec = 2; + + // Status describes the current status of a Cluster + optional ClusterStatus status = 3; +} + +// ClusterCondition describes current state of a cluster. +message ClusterCondition { + // Type of cluster condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// A list of all the kubernetes clusters registered to the federation +message ClusterList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // List of Cluster objects. + repeated Cluster items = 2; +} + +// ClusterSpec describes the attributes of a kubernetes cluster. +message ClusterSpec { + // A map of client CIDR to server address. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; + + // Name of the secret containing kubeconfig to access this cluster. + // The secret is read from the kubernetes cluster that is hosting federation control plane. + // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". + // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. + // This can be left empty if the cluster allows insecure access. + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretRef = 2; +} + +// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. +message ClusterStatus { + // Conditions is an array of current cluster conditions. + repeated ClusterCondition conditions = 1; + + // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // These will always be in the same region. + repeated string zones = 5; + + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + optional string region = 6; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go new file mode 100644 index 00000000..46dc3f10 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "federation" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Adds the list of known types to api.Scheme. +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Cluster{}, + &ClusterList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go new file mode 100644 index 00000000..f15988e2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// ClusterSpec describes the attributes of a kubernetes cluster. +type ClusterSpec struct { + // A map of client CIDR to server address. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR" protobuf:"bytes,1,rep,name=serverAddressByClientCIDRs"` + // Name of the secret containing kubeconfig to access this cluster. + // The secret is read from the kubernetes cluster that is hosting federation control plane. + // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". + // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. + // This can be left empty if the cluster allows insecure access. + SecretRef *v1.LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` +} + +type ClusterConditionType string + +// These are valid conditions of a cluster. +const ( + // ClusterReady means the cluster is ready to accept workloads. + ClusterReady ClusterConditionType = "Ready" + // ClusterOffline means the cluster is temporarily down or not reachable + ClusterOffline ClusterConditionType = "Offline" +) + +// ClusterCondition describes current state of a cluster. +type ClusterCondition struct { + // Type of cluster condition, Complete or Failed. + Type ClusterConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ClusterConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. +type ClusterStatus struct { + // Conditions is an array of current cluster conditions. + Conditions []ClusterCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. + // These will always be in the same region. + Zones []string `json:"zones,omitempty" protobuf:"bytes,5,rep,name=zones"` + // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. + Region string `json:"region,omitempty" protobuf:"bytes,6,opt,name=region"` +} + +// +genclient=true,nonNamespaced=true + +// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. +type Cluster struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Cluster. + Spec ClusterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status describes the current status of a Cluster + Status ClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// A list of all the kubernetes clusters registered to the federation +type ClusterList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of Cluster objects. + Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go new file mode 100644 index 00000000..f8163ef8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go @@ -0,0 +1,107 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation_internalclientset + +import ( + "github.com/golang/glog" + unversionedcore "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned" + unversionedfederation "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Federation() unversionedfederation.FederationInterface + Core() unversionedcore.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *unversionedfederation.FederationClient + *unversionedcore.CoreClient +} + +// Federation retrieves the FederationClient +func (c *Clientset) Federation() unversionedfederation.FederationInterface { + if c == nil { + return nil + } + return c.FederationClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() unversionedcore.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.FederationClient, err = unversionedfederation.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.FederationClient = unversionedfederation.NewForConfigOrDie(c) + clientset.CoreClient = unversionedcore.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.FederationClient = unversionedfederation.New(c) + clientset.CoreClient = unversionedcore.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go new file mode 100644 index 00000000..40d4acce --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated clientset. +package federation_internalclientset diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go new file mode 100644 index 00000000..af8c2e74 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation_internalclientset + +// These imports are the API groups the client will support. +import ( + _ "k8s.io/kubernetes/federation/apis/federation/install" +) + +func init() { +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go new file mode 100644 index 00000000..d308d0fe --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ServicesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/api" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go new file mode 100644 index 00000000..30cff08b --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go new file mode 100644 index 00000000..65df6665 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type ServiceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go new file mode 100644 index 00000000..006f601c --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*api.Service) (*api.Service, error) + Update(*api.Service) (*api.Service, error) + UpdateStatus(*api.Service) (*api.Service, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Service, error) + List(opts api.ListOptions) (*api.ServiceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client *CoreClient + ns string +} + +// newServices returns a Services +func newServices(c *CoreClient, namespace string) *services { + return &services{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { + result = &api.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go new file mode 100644 index 00000000..e2709514 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + federation "k8s.io/kubernetes/federation/apis/federation" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClustersGetter has a method to return a ClusterInterface. +// A group's client should implement this interface. +type ClustersGetter interface { + Clusters() ClusterInterface +} + +// ClusterInterface has methods to work with Cluster resources. +type ClusterInterface interface { + Create(*federation.Cluster) (*federation.Cluster, error) + Update(*federation.Cluster) (*federation.Cluster, error) + UpdateStatus(*federation.Cluster) (*federation.Cluster, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*federation.Cluster, error) + List(opts api.ListOptions) (*federation.ClusterList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterExpansion +} + +// clusters implements ClusterInterface +type clusters struct { + client *FederationClient +} + +// newClusters returns a Clusters +func newClusters(c *FederationClient) *clusters { + return &clusters{ + client: c, + } +} + +// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Create(cluster *federation.Cluster) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Post(). + Resource("clusters"). + Body(cluster). + Do(). + Into(result) + return +} + +// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Update(cluster *federation.Cluster) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + Body(cluster). + Do(). + Into(result) + return +} + +func (c *clusters) UpdateStatus(cluster *federation.Cluster) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + SubResource("status"). + Body(cluster). + Do(). + Into(result) + return +} + +// Delete takes name of the cluster and deletes it. Returns an error if one occurs. +func (c *clusters) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusters"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusters"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. +func (c *clusters) Get(name string) (result *federation.Cluster, err error) { + result = &federation.Cluster{} + err = c.client.Get(). + Resource("clusters"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Clusters that match those selectors. +func (c *clusters) List(opts api.ListOptions) (result *federation.ClusterList, err error) { + result = &federation.ClusterList{} + err = c.client.Get(). + Resource("clusters"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *clusters) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusters"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go new file mode 100644 index 00000000..30cff08b --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go new file mode 100644 index 00000000..be2a8a15 --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type FederationInterface interface { + GetRESTClient() *restclient.RESTClient + ClustersGetter +} + +// FederationClient is used to interact with features provided by the Federation group. +type FederationClient struct { + *restclient.RESTClient +} + +func (c *FederationClient) Clusters() ClusterInterface { + return newClusters(c) +} + +// NewForConfig creates a new FederationClient for the given config. +func NewForConfig(c *restclient.Config) (*FederationClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &FederationClient{client}, nil +} + +// NewForConfigOrDie creates a new FederationClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *FederationClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FederationClient for the given RESTClient. +func New(c *restclient.RESTClient) *FederationClient { + return &FederationClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if federation group is not registered, return an error + g, err := registered.Group("federation") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FederationClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go new file mode 100644 index 00000000..8888bf9b --- /dev/null +++ b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type ClusterExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go new file mode 100644 index 00000000..dbdf11cb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +const kubectlPrefix = "kubectl.kubernetes.io/" + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +const LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go new file mode 100644 index 00000000..6a422ea4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package annotations defines annotation keys that shared between server and client +package annotations diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/create.go b/vendor/k8s.io/kubernetes/pkg/api/rest/create.go new file mode 100644 index 00000000..fa95b7f9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/create.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// RESTCreateStrategy defines the minimum validation, accepted input, and +// name generation behavior to create an object that follows Kubernetes +// API conventions. +type RESTCreateStrategy interface { + runtime.ObjectTyper + // The name generate is used when the standard GenerateName field is set. + // The NameGenerator will be invoked prior to validation. + api.NameGenerator + + // NamespaceScoped returns true if the object must be within a namespace. + NamespaceScoped() bool + // PrepareForCreate is invoked on create before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + PrepareForCreate(obj runtime.Object) + // Validate is invoked after default fields in the object have been filled in before + // the object is persisted. This method should not mutate the object. + Validate(ctx api.Context, obj runtime.Object) field.ErrorList + // Canonicalize is invoked after validation has succeeded but before the + // object has been persisted. This method may mutate the object. + Canonicalize(obj runtime.Object) +} + +// BeforeCreate ensures that common operations for all resources are performed on creation. It only returns +// errors that can be converted to api.Status. It invokes PrepareForCreate, then GenerateName, then Validate. +// It returns nil if the object should be created. +func BeforeCreate(strategy RESTCreateStrategy, ctx api.Context, obj runtime.Object) error { + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + + if strategy.NamespaceScoped() { + if !api.ValidNamespace(ctx, objectMeta) { + return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") + } + } else { + objectMeta.Namespace = api.NamespaceNone + } + objectMeta.DeletionTimestamp = nil + objectMeta.DeletionGracePeriodSeconds = nil + strategy.PrepareForCreate(obj) + api.FillObjectMetaSystemFields(ctx, objectMeta) + api.GenerateName(strategy, objectMeta) + + if errs := strategy.Validate(ctx, obj); len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) + } + + // Custom validation (including name validation) passed + // Now run common validation on object meta + // Do this *after* custom validation so that specific error messages are shown whenever possible + if errs := validation.ValidateObjectMeta(objectMeta, strategy.NamespaceScoped(), validation.ValidatePathSegmentName, field.NewPath("metadata")); len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) + } + + strategy.Canonicalize(obj) + + return nil +} + +// CheckGeneratedNameError checks whether an error that occurred creating a resource is due +// to generation being unable to pick a valid name. +func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { + if !errors.IsAlreadyExists(err) { + return err + } + + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + + if len(objectMeta.GenerateName) == 0 { + return err + } + + return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0) +} + +// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error. +func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (*api.ObjectMeta, unversioned.GroupVersionKind, error) { + objectMeta, err := api.ObjectMetaFor(obj) + if err != nil { + return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) + } + kinds, _, err := typer.ObjectKinds(obj) + if err != nil { + return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) + } + return objectMeta, kinds[0], nil +} + +// NamespaceScopedStrategy has a method to tell if the object must be in a namespace. +type NamespaceScopedStrategy interface { + // NamespaceScoped returns if the object must be in a namespace. + NamespaceScoped() bool +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go b/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go new file mode 100644 index 00000000..34965d52 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go @@ -0,0 +1,93 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// RESTDeleteStrategy defines deletion behavior on an object that follows Kubernetes +// API conventions. +type RESTDeleteStrategy interface { + runtime.ObjectTyper +} + +// RESTGracefulDeleteStrategy must be implemented by the registry that supports +// graceful deletion. +type RESTGracefulDeleteStrategy interface { + // CheckGracefulDelete should return true if the object can be gracefully deleted and set + // any default values on the DeleteOptions. + CheckGracefulDelete(obj runtime.Object, options *api.DeleteOptions) bool +} + +// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object +// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted +// (and the provided grace period is longer than the time to deletion), and an error is returned if the +// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with +// default values if graceful is true. +func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { + objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return false, false, kerr + } + // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. + if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID { + return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID)) + } + gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) + if !ok { + return false, false, nil + } + // if the object is already being deleted + if objectMeta.DeletionTimestamp != nil { + // if we are already being deleted, we may only shorten the deletion grace period + // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, + // so we force deletion immediately + if objectMeta.DeletionGracePeriodSeconds == nil { + return false, false, nil + } + // only a shorter grace period may be provided by a user + if options.GracePeriodSeconds != nil { + period := int64(*options.GracePeriodSeconds) + if period > *objectMeta.DeletionGracePeriodSeconds { + return false, true, nil + } + now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) + objectMeta.DeletionTimestamp = &now + objectMeta.DeletionGracePeriodSeconds = &period + options.GracePeriodSeconds = &period + return true, false, nil + } + // graceful deletion is pending, do nothing + options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds + return false, true, nil + } + + if !gracefulStrategy.CheckGracefulDelete(obj, options) { + return false, false, nil + } + now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) + objectMeta.DeletionTimestamp = &now + objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds + return true, false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go b/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go new file mode 100644 index 00000000..8fed0e9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rest defines common logic around changes to Kubernetes resources. +package rest diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/export.go b/vendor/k8s.io/kubernetes/pkg/api/rest/export.go new file mode 100644 index 00000000..e12f65de --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/export.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +// RESTExportStrategy is the interface that defines how to export a Kubernetes object +type RESTExportStrategy interface { + // Export strips fields that can not be set by the user. If 'exact' is false + // fields specific to the cluster are also stripped + Export(obj runtime.Object, exact bool) error +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go b/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go new file mode 100644 index 00000000..4d5b5ba9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go @@ -0,0 +1,306 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "io" + "net/http" + "net/url" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +//TODO: +// Storage interfaces need to be separated into two groups; those that operate +// on collections and those that operate on individually named items. +// Collection interfaces: +// (Method: Current -> Proposed) +// GET: Lister -> CollectionGetter +// WATCH: Watcher -> CollectionWatcher +// CREATE: Creater -> CollectionCreater +// DELETE: (n/a) -> CollectionDeleter +// UPDATE: (n/a) -> CollectionUpdater +// +// Single item interfaces: +// (Method: Current -> Proposed) +// GET: Getter -> NamedGetter +// WATCH: (n/a) -> NamedWatcher +// CREATE: (n/a) -> NamedCreater +// DELETE: Deleter -> NamedDeleter +// UPDATE: Update -> NamedUpdater + +// Storage is a generic interface for RESTful storage services. +// Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected +// that objects may implement any of the below interfaces. +type Storage interface { + // New returns an empty object that can be used with Create and Update after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object +} + +// KindProvider specifies a different kind for its API than for its internal storage. This is necessary for external +// objects that are not compiled into the api server. For such objects, there is no in-memory representation for +// the object, so they must be represented as generic objects (e.g. runtime.Unknown), but when we present the object as part of +// API discovery we want to present the specific kind, not the generic internal representation. +type KindProvider interface { + Kind() string +} + +// Lister is an object that can retrieve resources that match the provided field and label criteria. +type Lister interface { + // NewList returns an empty object that can be used with the List call. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + NewList() runtime.Object + // List selects resources in the storage which match to the selector. 'options' can be nil. + List(ctx api.Context, options *api.ListOptions) (runtime.Object, error) +} + +// Exporter is an object that knows how to strip a RESTful resource for export +type Exporter interface { + // Export an object. Fields that are not user specified (e.g. Status, ObjectMeta.ResourceVersion) are stripped out + // Returns the stripped object. If 'exact' is true, fields that are specific to the cluster (e.g. namespace) are + // retained, otherwise they are stripped also. + Export(ctx api.Context, name string, opts unversioned.ExportOptions) (runtime.Object, error) +} + +// Getter is an object that can retrieve a named RESTful resource. +type Getter interface { + // Get finds a resource in the storage by name and returns it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + Get(ctx api.Context, name string) (runtime.Object, error) +} + +// GetterWithOptions is an object that retrieve a named RESTful resource and takes +// additional options on the get request. It allows a caller to also receive the +// subpath of the GET request. +type GetterWithOptions interface { + // Get finds a resource in the storage by name and returns it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // The options object passed to it is of the same type returned by the NewGetOptions + // method. + Get(ctx api.Context, name string, options runtime.Object) (runtime.Object, error) + + // NewGetOptions returns an empty options object that will be used to pass + // options to the Get method. It may return a bool and a string, if true, the + // value of the request path below the object will be included as the named + // string in the serialization of the runtime object. E.g., returning "path" + // will convert the trailing request scheme value to "path" in the map[string][]string + // passed to the converter. + NewGetOptions() (runtime.Object, bool, string) +} + +// Deleter is an object that can delete a named RESTful resource. +type Deleter interface { + // Delete finds a resource in the storage and deletes it. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // Delete *may* return the object that was deleted, or a status object indicating additional + // information about deletion. + Delete(ctx api.Context, name string) (runtime.Object, error) +} + +// GracefulDeleter knows how to pass deletion options to allow delayed deletion of a +// RESTful object. +type GracefulDeleter interface { + // Delete finds a resource in the storage and deletes it. + // If options are provided, the resource will attempt to honor them or return an invalid + // request error. + // Although it can return an arbitrary error value, IsNotFound(err) is true for the + // returned error value err when the specified resource is not found. + // Delete *may* return the object that was deleted, or a status object indicating additional + // information about deletion. + Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) +} + +// GracefulDeleteAdapter adapts the Deleter interface to GracefulDeleter +type GracefulDeleteAdapter struct { + Deleter +} + +// Delete implements RESTGracefulDeleter in terms of Deleter +func (w GracefulDeleteAdapter) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { + return w.Deleter.Delete(ctx, name) +} + +// CollectionDeleter is an object that can delete a collection +// of RESTful resources. +type CollectionDeleter interface { + // DeleteCollection selects all resources in the storage matching given 'listOptions' + // and deletes them. If 'options' are provided, the resource will attempt to honor + // them or return an invalid request error. + // DeleteCollection may not be atomic - i.e. it may delete some objects and still + // return an error after it. On success, returns a list of deleted objects. + DeleteCollection(ctx api.Context, options *api.DeleteOptions, listOptions *api.ListOptions) (runtime.Object, error) +} + +// Creater is an object that can create an instance of a RESTful object. +type Creater interface { + // New returns an empty object that can be used with Create after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Create creates a new version of a resource. + Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) +} + +// NamedCreater is an object that can create an instance of a RESTful object using a name parameter. +type NamedCreater interface { + // New returns an empty object that can be used with Create after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Create creates a new version of a resource. It expects a name parameter from the path. + // This is needed for create operations on subresources which include the name of the parent + // resource in the path. + Create(ctx api.Context, name string, obj runtime.Object) (runtime.Object, error) +} + +// UpdatedObjectInfo provides information about an updated object to an Updater. +// It requires access to the old object in order to return the newly updated object. +type UpdatedObjectInfo interface { + // Returns preconditions built from the updated object, if applicable. + // May return nil, or a preconditions object containing nil fields, + // if no preconditions can be determined from the updated object. + Preconditions() *api.Preconditions + + // UpdatedObject returns the updated object, given a context and old object. + // The only time an empty oldObj should be passed in is if a "create on update" is occurring (there is no oldObj). + UpdatedObject(ctx api.Context, oldObj runtime.Object) (newObj runtime.Object, err error) +} + +// Updater is an object that can update an instance of a RESTful object. +type Updater interface { + // New returns an empty object that can be used with Update after request data has been put into it. + // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) + New() runtime.Object + + // Update finds a resource in the storage and updates it. Some implementations + // may allow updates creates the object - they should set the created boolean + // to true. + Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) +} + +// CreaterUpdater is a storage object that must support both create and update. +// Go prevents embedded interfaces that implement the same method. +type CreaterUpdater interface { + Creater + Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) +} + +// CreaterUpdater must satisfy the Updater interface. +var _ Updater = CreaterUpdater(nil) + +// Patcher is a storage object that supports both get and update. +type Patcher interface { + Getter + Updater +} + +// Watcher should be implemented by all Storage objects that +// want to offer the ability to watch for changes through the watch api. +type Watcher interface { + // 'label' selects on labels; 'field' selects on the object's fields. Not all fields + // are supported; an error should be returned if 'field' tries to select on a field that + // isn't supported. 'resourceVersion' allows for continuing/starting a watch at a + // particular version. + Watch(ctx api.Context, options *api.ListOptions) (watch.Interface, error) +} + +// StandardStorage is an interface covering the common verbs. Provided for testing whether a +// resource satisfies the normal storage methods. Use Storage when passing opaque storage objects. +type StandardStorage interface { + Getter + Lister + CreaterUpdater + GracefulDeleter + CollectionDeleter + Watcher +} + +// Redirector know how to return a remote resource's location. +type Redirector interface { + // ResourceLocation should return the remote location of the given resource, and an optional transport to use to request it, or an error. + ResourceLocation(ctx api.Context, id string) (remoteLocation *url.URL, transport http.RoundTripper, err error) +} + +// Responder abstracts the normal response behavior for a REST method and is passed to callers that +// may wish to handle the response directly in some cases, but delegate to the normal error or object +// behavior in other cases. +type Responder interface { + // Object writes the provided object to the response. Invoking this method multiple times is undefined. + Object(statusCode int, obj runtime.Object) + // Error writes the provided error to the response. This method may only be invoked once. + Error(err error) +} + +// Connecter is a storage object that responds to a connection request. +type Connecter interface { + // Connect returns an http.Handler that will handle the request/response for a given API invocation. + // The provided responder may be used for common API responses. The responder will write both status + // code and body, so the ServeHTTP method should exit after invoking the responder. The Handler will + // be used for a single API request and then discarded. The Responder is guaranteed to write to the + // same http.ResponseWriter passed to ServeHTTP. + Connect(ctx api.Context, id string, options runtime.Object, r Responder) (http.Handler, error) + + // NewConnectOptions returns an empty options object that will be used to pass + // options to the Connect method. If nil, then a nil options object is passed to + // Connect. It may return a bool and a string. If true, the value of the request + // path below the object will be included as the named string in the serialization + // of the runtime object. + NewConnectOptions() (runtime.Object, bool, string) + + // ConnectMethods returns the list of HTTP methods handled by Connect + ConnectMethods() []string +} + +// ResourceStreamer is an interface implemented by objects that prefer to be streamed from the server +// instead of decoded directly. +type ResourceStreamer interface { + // InputStream should return an io.ReadCloser if the provided object supports streaming. The desired + // api version and a accept header (may be empty) are passed to the call. If no error occurs, + // the caller may return a flag indicating whether the result should be flushed as writes occur + // and a content type string that indicates the type of the stream. + // If a null stream is returned, a StatusNoContent response wil be generated. + InputStream(apiVersion, acceptHeader string) (stream io.ReadCloser, flush bool, mimeType string, err error) +} + +// StorageMetadata is an optional interface that callers can implement to provide additional +// information about their Storage objects. +type StorageMetadata interface { + // ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE, + // PATCH) can respond with. + ProducesMIMETypes(verb string) []string +} + +// ConnectRequest is an object passed to admission control for Connect operations +type ConnectRequest struct { + // Name is the name of the object on which the connect request was made + Name string + + // Options is the options object passed to the connect request. See the NewConnectOptions method on Connecter + Options runtime.Object + + // ResourcePath is the path for the resource in the REST server (ie. "pods/proxy") + ResourcePath string +} + +func (obj *ConnectRequest) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/types.go b/vendor/k8s.io/kubernetes/pkg/api/rest/types.go new file mode 100644 index 00000000..0e7f048b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +// ObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. An ObjectFunc may transform the provided +// object. +type ObjectFunc func(obj runtime.Object) error + +// AllFuncs returns an ObjectFunc that attempts to run all of the provided functions +// in order, returning early if there are any errors. +func AllFuncs(fns ...ObjectFunc) ObjectFunc { + return func(obj runtime.Object) error { + for _, fn := range fns { + if fn == nil { + continue + } + if err := fn(obj); err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/update.go b/vendor/k8s.io/kubernetes/pkg/api/rest/update.go new file mode 100644 index 00000000..bc5ed0c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/update.go @@ -0,0 +1,175 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// RESTUpdateStrategy defines the minimum validation, accepted input, and +// name generation behavior to update an object that follows Kubernetes +// API conventions. A resource may have many UpdateStrategies, depending on +// the call pattern in use. +type RESTUpdateStrategy interface { + runtime.ObjectTyper + // NamespaceScoped returns true if the object must be within a namespace. + NamespaceScoped() bool + // AllowCreateOnUpdate returns true if the object can be created by a PUT. + AllowCreateOnUpdate() bool + // PrepareForUpdate is invoked on update before validation to normalize + // the object. For example: remove fields that are not to be persisted, + // sort order-insensitive list fields, etc. This should not remove fields + // whose presence would be considered a validation error. + PrepareForUpdate(obj, old runtime.Object) + // ValidateUpdate is invoked after default fields in the object have been + // filled in before the object is persisted. This method should not mutate + // the object. + ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList + // Canonicalize is invoked after validation has succeeded but before the + // object has been persisted. This method may mutate the object. + Canonicalize(obj runtime.Object) + // AllowUnconditionalUpdate returns true if the object can be updated + // unconditionally (irrespective of the latest resource version), when + // there is no resource version specified in the object. + AllowUnconditionalUpdate() bool +} + +// TODO: add other common fields that require global validation. +func validateCommonFields(obj, old runtime.Object) (field.ErrorList, error) { + allErrs := field.ErrorList{} + objectMeta, err := api.ObjectMetaFor(obj) + if err != nil { + return nil, fmt.Errorf("failed to get new object metadata: %v", err) + } + oldObjectMeta, err := api.ObjectMetaFor(old) + if err != nil { + return nil, fmt.Errorf("failed to get old object metadata: %v", err) + } + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(objectMeta, oldObjectMeta, field.NewPath("metadata"))...) + + return allErrs, nil +} + +// BeforeUpdate ensures that common operations for all resources are performed on update. It only returns +// errors that can be converted to api.Status. It will invoke update validation with the provided existing +// and updated objects. +func BeforeUpdate(strategy RESTUpdateStrategy, ctx api.Context, obj, old runtime.Object) error { + objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) + if kerr != nil { + return kerr + } + if strategy.NamespaceScoped() { + if !api.ValidNamespace(ctx, objectMeta) { + return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") + } + } else { + objectMeta.Namespace = api.NamespaceNone + } + + strategy.PrepareForUpdate(obj, old) + + // Ensure some common fields, like UID, are validated for all resources. + errs, err := validateCommonFields(obj, old) + if err != nil { + return errors.NewInternalError(err) + } + + errs = append(errs, strategy.ValidateUpdate(ctx, obj, old)...) + if len(errs) > 0 { + return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) + } + + strategy.Canonicalize(obj) + + return nil +} + +// TransformFunc is a function to transform and return newObj +type TransformFunc func(ctx api.Context, newObj runtime.Object, oldObj runtime.Object) (transformedNewObj runtime.Object, err error) + +// defaultUpdatedObjectInfo implements UpdatedObjectInfo +type defaultUpdatedObjectInfo struct { + // obj is the updated object + obj runtime.Object + + // copier makes a copy of the object before returning it. + // this allows repeated calls to UpdatedObject() to return + // pristine data, even if the returned value is mutated. + copier runtime.ObjectCopier + + // transformers is an optional list of transforming functions that modify or + // replace obj using information from the context, old object, or other sources. + transformers []TransformFunc +} + +// DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. +func DefaultUpdatedObjectInfo(obj runtime.Object, copier runtime.ObjectCopier, transformers ...TransformFunc) UpdatedObjectInfo { + return &defaultUpdatedObjectInfo{obj, copier, transformers} +} + +// Preconditions satisfies the UpdatedObjectInfo interface. +func (i *defaultUpdatedObjectInfo) Preconditions() *api.Preconditions { + // Attempt to get the UID out of the object + accessor, err := meta.Accessor(i.obj) + if err != nil { + // If no UID can be read, no preconditions are possible + return nil + } + + // If empty, no preconditions needed + uid := accessor.GetUID() + if len(uid) == 0 { + return nil + } + + return &api.Preconditions{UID: &uid} +} + +// UpdatedObject satisfies the UpdatedObjectInfo interface. +// It returns a copy of the held obj, passed through any configured transformers. +func (i *defaultUpdatedObjectInfo) UpdatedObject(ctx api.Context, oldObj runtime.Object) (runtime.Object, error) { + var err error + // Start with the configured object + newObj := i.obj + + // If the original is non-nil (might be nil if the first transformer builds the object from the oldObj), make a copy, + // so we don't return the original. BeforeUpdate can mutate the returned object, doing things like clearing ResourceVersion. + // If we're re-called, we need to be able to return the pristine version. + if newObj != nil { + newObj, err = i.copier.Copy(newObj) + if err != nil { + return nil, err + } + } + + // Allow any configured transformers to update the new object + for _, transformer := range i.transformers { + newObj, err = transformer(ctx, newObj, oldObj) + if err != nil { + return nil, err + } + } + + return newObj, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go new file mode 100644 index 00000000..5a2135c6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go @@ -0,0 +1,117 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package apps + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_apps_PetSet, + DeepCopy_apps_PetSetList, + DeepCopy_apps_PetSetSpec, + DeepCopy_apps_PetSetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_apps_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_apps_PetSetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_apps_PetSetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_apps_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PetSet, len(in)) + for i := range in { + if err := DeepCopy_apps_PetSet(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_apps_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(in)) + for i := range in { + if err := api.DeepCopy_api_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func DeepCopy_apps_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error { + if in.ObservedGeneration != nil { + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in + } else { + out.ObservedGeneration = nil + } + out.Replicas = in.Replicas + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go new file mode 100644 index 00000000..b4d9011d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the apps API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/apps/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/apps" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", apps.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(apps.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + apps.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go new file mode 100644 index 00000000..dd5b9a90 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) +} + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &PetSet{}, + &PetSetList{}, + &api.ListOptions{}, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go new file mode 100644 index 00000000..87019c45 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -0,0 +1,94 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// PetSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The PetSet guarantees that a given network identity will always +// map to the same storage identity. PetSet is currently in alpha and +// and subject to change without notice. +type PetSet struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired identities of pets in this set. + Spec PetSetSpec `json:"spec,omitempty"` + + // Status is the current status of Pets in this PetSet. This data + // may be out of date by some window of time. + Status PetSetStatus `json:"status,omitempty"` +} + +// A PetSetSpec is the specification of a PetSet. +type PetSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + Replicas int `json:"replicas,omitempty"` + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + Selector *unversioned.LabelSelector `json:"selector,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the PetSet + // will fulfill this Template, but have a unique identity from the rest + // of the PetSet. + Template api.PodTemplateSpec `json:"template"` + + // VolumeClaimTemplates is a list of claims that pets are allowed to reference. + // The PetSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pet. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + VolumeClaimTemplates []api.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + + // ServiceName is the name of the service that governs this PetSet. + // This service must exist before the PetSet, and is responsible for + // the network identity of the set. Pets get DNS/hostnames that follow the + // pattern: pet-specific-string.serviceName.default.svc.cluster.local + // where "pet-specific-string" is managed by the PetSet controller. + ServiceName string `json:"serviceName"` +} + +// PetSetStatus represents the current state of a PetSet. +type PetSetStatus struct { + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // Replicas is the number of actual replicas. + Replicas int `json:"replicas"` +} + +// PetSetList is a collection of PetSets. +type PetSetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty"` + Items []PetSet `json:"items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go new file mode 100644 index 00000000..48f1f2b4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions to handle the *int32 -> int + // conversion. A pointer is useful in the versioned type so we can default + // it, but a plain int32 is more convenient in the internal type. These + // functions are the same as the autogenerated ones in every other way. + err := scheme.AddConversionFuncs( + Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, + Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + + err = api.Scheme.AddFieldLabelConversionFunc("apps/v1alpha1", "PetSet", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(in *PetSetSpec, out *apps.PetSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = int(*in.Replicas) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(in *apps.PetSetSpec, out *PetSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = int32(in.Replicas) + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go new file mode 100644 index 00000000..cfd6ce44 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go @@ -0,0 +1,156 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + apps "k8s.io/kubernetes/pkg/apis/apps" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PetSet_To_apps_PetSet, + Convert_apps_PetSet_To_v1alpha1_PetSet, + Convert_v1alpha1_PetSetList_To_apps_PetSetList, + Convert_apps_PetSetList_To_v1alpha1_PetSetList, + Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, + Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, + Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus, + Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error { + SetDefaults_PetSet(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error { + return autoConvert_v1alpha1_PetSet_To_apps_PetSet(in, out, s) +} + +func autoConvert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error { + return autoConvert_apps_PetSet_To_v1alpha1_PetSet(in, out, s) +} + +func autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apps.PetSet, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PetSet_To_apps_PetSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in, out, s) +} + +func autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PetSet, len(*in)) + for i := range *in { + if err := Convert_apps_PetSet_To_v1alpha1_PetSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error { + return autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in, out, s) +} + +func autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = int(in.Replicas) + return nil +} + +func Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in, out, s) +} + +func autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = int32(in.Replicas) + return nil +} + +func Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error { + return autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go new file mode 100644 index 00000000..6e51cacb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_PetSet, + DeepCopy_v1alpha1_PetSetList, + DeepCopy_v1alpha1_PetSetSpec, + DeepCopy_v1alpha1_PetSetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PetSetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PetSetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PetSet, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PetSet(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error { + if in.Replicas != nil { + in, out := in.Replicas, &out.Replicas + *out = new(int32) + **out = *in + } else { + out.Replicas = nil + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(in)) + for i := range in { + if err := v1.DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func DeepCopy_v1alpha1_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error { + if in.ObservedGeneration != nil { + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in + } else { + out.ObservedGeneration = nil + } + out.Replicas = in.Replicas + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go new file mode 100644 index 00000000..e4102813 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go @@ -0,0 +1,46 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_PetSet, + ) +} + +func SetDefaults_PetSet(obj *PetSet) { + labels := obj.Spec.Template.Labels + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &unversioned.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go new file mode 100644 index 00000000..65a03a20 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go new file mode 100644 index 00000000..88f1bcd4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go @@ -0,0 +1,969 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto + + It has these top-level messages: + PetSet + PetSetList + PetSetSpec + PetSetStatus +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *PetSet) Reset() { *m = PetSet{} } +func (m *PetSet) String() string { return proto.CompactTextString(m) } +func (*PetSet) ProtoMessage() {} + +func (m *PetSetList) Reset() { *m = PetSetList{} } +func (m *PetSetList) String() string { return proto.CompactTextString(m) } +func (*PetSetList) ProtoMessage() {} + +func (m *PetSetSpec) Reset() { *m = PetSetSpec{} } +func (m *PetSetSpec) String() string { return proto.CompactTextString(m) } +func (*PetSetSpec) ProtoMessage() {} + +func (m *PetSetStatus) Reset() { *m = PetSetStatus{} } +func (m *PetSetStatus) String() string { return proto.CompactTextString(m) } +func (*PetSetStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*PetSet)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSet") + proto.RegisterType((*PetSetList)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetList") + proto.RegisterType((*PetSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetSpec") + proto.RegisterType((*PetSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetStatus") +} +func (m *PetSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *PetSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PetSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n5, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n6, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + return i, nil +} + +func (m *PetSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PetSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PetSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PetSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PetSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PetSetStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PetSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PetSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PetSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PetSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PetSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PetSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PetSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto new file mode 100644 index 00000000..ffb6e2ed --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.apps.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PetSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The PetSet guarantees that a given network identity will always +// map to the same storage identity. PetSet is currently in alpha +// and subject to change without notice. +message PetSet { + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pets in this set. + optional PetSetSpec spec = 2; + + // Status is the current status of Pets in this PetSet. This data + // may be out of date by some window of time. + optional PetSetStatus status = 3; +} + +// PetSetList is a collection of PetSets. +message PetSetList { + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + repeated PetSet items = 2; +} + +// A PetSetSpec is the specification of a PetSet. +message PetSetSpec { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + optional int32 replicas = 1; + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the PetSet + // will fulfill this Template, but have a unique identity from the rest + // of the PetSet. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // VolumeClaimTemplates is a list of claims that pets are allowed to reference. + // The PetSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pet. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // ServiceName is the name of the service that governs this PetSet. + // This service must exist before the PetSet, and is responsible for + // the network identity of the set. Pets get DNS/hostnames that follow the + // pattern: pet-specific-string.serviceName.default.svc.cluster.local + // where "pet-specific-string" is managed by the PetSet controller. + optional string serviceName = 5; +} + +// PetSetStatus represents the current state of a PetSet. +message PetSetStatus { + // most recent generation observed by this autoscaler. + optional int64 observedGeneration = 1; + + // Replicas is the number of actual replicas. + optional int32 replicas = 2; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go new file mode 100644 index 00000000..9ab37dfb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &PetSet{}, + &PetSetList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} + +func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go new file mode 100644 index 00000000..fb0aa48a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go @@ -0,0 +1,94 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// PetSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The PetSet guarantees that a given network identity will always +// map to the same storage identity. PetSet is currently in alpha +// and subject to change without notice. +type PetSet struct { + unversioned.TypeMeta `json:",inline"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pets in this set. + Spec PetSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pets in this PetSet. This data + // may be out of date by some window of time. + Status PetSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// A PetSetSpec is the specification of a PetSet. +type PetSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the PetSet + // will fulfill this Template, but have a unique identity from the rest + // of the PetSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // VolumeClaimTemplates is a list of claims that pets are allowed to reference. + // The PetSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pet. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // ServiceName is the name of the service that governs this PetSet. + // This service must exist before the PetSet, and is responsible for + // the network identity of the set. Pets get DNS/hostnames that follow the + // pattern: pet-specific-string.serviceName.default.svc.cluster.local + // where "pet-specific-string" is managed by the PetSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` +} + +// PetSetStatus represents the current state of a PetSet. +type PetSetStatus struct { + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Replicas is the number of actual replicas. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` +} + +// PetSetList is a collection of PetSets. +type PetSetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PetSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..8ce682b4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PetSet = map[string]string{ + "": "PetSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe PetSet guarantees that a given network identity will always map to the same storage identity. PetSet is currently in alpha and subject to change without notice.", + "spec": "Spec defines the desired identities of pets in this set.", + "status": "Status is the current status of Pets in this PetSet. This data may be out of date by some window of time.", +} + +func (PetSet) SwaggerDoc() map[string]string { + return map_PetSet +} + +var map_PetSetList = map[string]string{ + "": "PetSetList is a collection of PetSets.", +} + +func (PetSetList) SwaggerDoc() map[string]string { + return map_PetSetList +} + +var map_PetSetSpec = map[string]string{ + "": "A PetSetSpec is the specification of a PetSet.", + "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the PetSet will fulfill this Template, but have a unique identity from the rest of the PetSet.", + "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pets are allowed to reference. The PetSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pet. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "ServiceName is the name of the service that governs this PetSet. This service must exist before the PetSet, and is responsible for the network identity of the set. Pets get DNS/hostnames that follow the pattern: pet-specific-string.serviceName.default.svc.cluster.local where \"pet-specific-string\" is managed by the PetSet controller.", +} + +func (PetSetSpec) SwaggerDoc() map[string]string { + return map_PetSetSpec +} + +var map_PetSetStatus = map[string]string{ + "": "PetSetStatus represents the current state of a PetSet.", + "observedGeneration": "most recent generation observed by this autoscaler.", + "replicas": "Replicas is the number of actual replicas.", +} + +func (PetSetStatus) SwaggerDoc() map[string]string { + return map_PetSetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go new file mode 100644 index 00000000..75ac7281 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go @@ -0,0 +1,91 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authentication + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_authenticationk8sio_TokenReview, + DeepCopy_authenticationk8sio_TokenReviewSpec, + DeepCopy_authenticationk8sio_TokenReviewStatus, + DeepCopy_authenticationk8sio_UserInfo, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_authenticationk8sio_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authenticationk8sio_TokenReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authenticationk8sio_TokenReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authenticationk8sio_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error { + out.Token = in.Token + return nil +} + +func DeepCopy_authenticationk8sio_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error { + out.Authenticated = in.Authenticated + if err := DeepCopy_authenticationk8sio_UserInfo(in.User, &out.User, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authenticationk8sio_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error { + out.Username = in.Username + out.UID = in.UID + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go new file mode 100644 index 00000000..29447d21 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go @@ -0,0 +1,123 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" + "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", authentication.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + authentication.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1beta1.SchemeGroupVersion: + v1beta1.AddToScheme(api.Scheme) + } + } +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + rootScoped := sets.NewString("TokenReview") + ignoredKinds := sets.NewString() + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1beta1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(authentication.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go new file mode 100644 index 00000000..4dda3140 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) +} + +func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go new file mode 100644 index 00000000..02ec0d2b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// TokenReview attempts to authenticate a token to a known user. +type TokenReview struct { + unversioned.TypeMeta + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec + + // Status is filled in by the server and indicates whether the request can be authenticated. + Status TokenReviewStatus +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + Token string +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + Authenticated bool + // User is the UserInfo associated with the provided token. + User UserInfo +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + Username string + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string + // The names of groups this user is a part of. + Groups []string + // Any additional information provided by the authenticator. + Extra map[string][]string +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go new file mode 100644 index 00000000..6a8545d1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs() + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go new file mode 100644 index 00000000..9972f82e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go @@ -0,0 +1,143 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + authentication_k8s_io "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview, + Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview, + Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec, + Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec, + Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus, + Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus, + Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo, + Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in, out, s) +} + +func autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in, out, s) +} + +func autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in, out, s) +} + +func autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + return nil +} + +func Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error { + return autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in, out, s) +} + +func autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go new file mode 100644 index 00000000..e44dfc86 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go @@ -0,0 +1,91 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1beta1_TokenReview, + DeepCopy_v1beta1_TokenReviewSpec, + DeepCopy_v1beta1_TokenReviewStatus, + DeepCopy_v1beta1_UserInfo, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1beta1_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_TokenReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_TokenReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error { + out.Token = in.Token + return nil +} + +func DeepCopy_v1beta1_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error { + out.Authenticated = in.Authenticated + if err := DeepCopy_v1beta1_UserInfo(in.User, &out.User, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error { + out.Username = in.Username + out.UID = in.UID + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go new file mode 100644 index 00000000..0f3732e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go new file mode 100644 index 00000000..cfdb87c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go new file mode 100644 index 00000000..e183299c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) +} + +func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go new file mode 100644 index 00000000..fc136877 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + unversioned.TypeMeta `json:",inline"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + Status TokenReviewStatus `json:"status,omitempty"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + Token string `json:"token,omitempty"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + Authenticated bool `json:"authenticated,omitempty"` + // User is the UserInfo associated with the provided token. + User UserInfo `json:"user,omitempty"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + Username string `json:"username,omitempty"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string `json:"uid,omitempty"` + // The names of groups this user is a part of. + Groups []string `json:"groups,omitempty"` + // Any additional information provided by the authenticator. + Extra map[string][]string `json:"extra,omitempty"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go new file mode 100644 index 00000000..bc40fb33 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go @@ -0,0 +1,170 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authorization + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_authorization_LocalSubjectAccessReview, + DeepCopy_authorization_NonResourceAttributes, + DeepCopy_authorization_ResourceAttributes, + DeepCopy_authorization_SelfSubjectAccessReview, + DeepCopy_authorization_SelfSubjectAccessReviewSpec, + DeepCopy_authorization_SubjectAccessReview, + DeepCopy_authorization_SubjectAccessReviewSpec, + DeepCopy_authorization_SubjectAccessReviewStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_authorization_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authorization_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func DeepCopy_authorization_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func DeepCopy_authorization_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_authorization_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_authorization_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func DeepCopy_authorization_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_authorization_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_authorization_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_authorization_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} + +func DeepCopy_authorization_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go new file mode 100644 index 00000000..bf8814dd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go @@ -0,0 +1,128 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/authorization" + "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/authorization" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", authorization.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + authorization.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1beta1.SchemeGroupVersion: + v1beta1.AddToScheme(api.Scheme) + } + } +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + worstToBestGroupVersions := []unversioned.GroupVersion{} + for i := len(externalVersions) - 1; i >= 0; i-- { + worstToBestGroupVersions = append(worstToBestGroupVersions, externalVersions[i]) + } + + rootScoped := sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview") + ignoredKinds := sets.NewString() + return api.NewDefaultRESTMapper(worstToBestGroupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1beta1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(authorization.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go new file mode 100644 index 00000000..fdb6c4f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorization + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go new file mode 100644 index 00000000..8cfdfbe9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go @@ -0,0 +1,124 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorization + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a +// spec.namespace means "in all namespaces". +type SubjectAccessReview struct { + unversioned.TypeMeta + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + unversioned.TypeMeta + + // Spec holds information about the request being evaluated. + Spec SelfSubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + unversioned.TypeMeta + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + Namespace string + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verb string + // Group is the API Group of the Resource. "*" means all. + Group string + // Version is the API Version of the Resource. "*" means all. + Version string + // Resource is one of the existing resource types. "*" means all. + Resource string + // Subresource is one of the existing resource types. "" means none. + Subresource string + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + Name string +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + Path string + // Verb is the standard HTTP verb + Verb string +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes +// and NonResourceAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + User string + // Groups is the groups you're testing for. + Groups []string + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + Extra map[string][]string +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes +// and NonResourceAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool + // Reason is optional. It indicates why a request was allowed or denied. + Reason string +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go new file mode 100644 index 00000000..0b45ed5f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go @@ -0,0 +1,30 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs() + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go new file mode 100644 index 00000000..a475c0fd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go @@ -0,0 +1,333 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + authorization "k8s.io/kubernetes/pkg/apis/authorization" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview, + Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes, + Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes, + Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview, + Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview, + Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(authorization.ResourceAttributes) + if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(authorization.NonResourceAttributes) + if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(authorization.ResourceAttributes) + if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(authorization.NonResourceAttributes) + if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + out.Groups = in.Groups + out.Extra = in.Extra + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} + +func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go new file mode 100644 index 00000000..94a35650 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go @@ -0,0 +1,170 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1beta1_LocalSubjectAccessReview, + DeepCopy_v1beta1_NonResourceAttributes, + DeepCopy_v1beta1_ResourceAttributes, + DeepCopy_v1beta1_SelfSubjectAccessReview, + DeepCopy_v1beta1_SelfSubjectAccessReviewSpec, + DeepCopy_v1beta1_SubjectAccessReview, + DeepCopy_v1beta1_SubjectAccessReviewSpec, + DeepCopy_v1beta1_SubjectAccessReviewStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1beta1_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func DeepCopy_v1beta1_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func DeepCopy_v1beta1_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_v1beta1_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_v1beta1_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + return nil +} + +func DeepCopy_v1beta1_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1beta1_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error { + if in.ResourceAttributes != nil { + in, out := in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + if err := DeepCopy_v1beta1_ResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.ResourceAttributes = nil + } + if in.NonResourceAttributes != nil { + in, out := in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + if err := DeepCopy_v1beta1_NonResourceAttributes(*in, *out, c); err != nil { + return err + } + } else { + out.NonResourceAttributes = nil + } + out.User = in.User + if in.Groups != nil { + in, out := in.Groups, &out.Groups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Groups = nil + } + if in.Extra != nil { + in, out := in.Extra, &out.Extra + *out = make(map[string][]string) + for key, val := range in { + if newVal, err := c.DeepCopy(val); err != nil { + return err + } else { + (*out)[key] = newVal.([]string) + } + } + } else { + out.Extra = nil + } + return nil +} + +func DeepCopy_v1beta1_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go new file mode 100644 index 00000000..340f8075 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go new file mode 100644 index 00000000..cfdb87c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go new file mode 100644 index 00000000..d9e33ed5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) +} + +func (obj *LocalSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go new file mode 100644 index 00000000..27078e9f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + unversioned.TypeMeta `json:",inline"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus `json:"status,omitempty"` +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + unversioned.TypeMeta `json:",inline"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus `json:"status,omitempty"` +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + unversioned.TypeMeta `json:",inline"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus `json:"status,omitempty"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + Namespace string `json:"namespace,omitempty"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verb string `json:"verb,omitempty"` + // Group is the API Group of the Resource. "*" means all. + Group string `json:"group,omitempty"` + // Version is the API Version of the Resource. "*" means all. + Version string `json:"version,omitempty"` + // Resource is one of the existing resource types. "*" means all. + Resource string `json:"resource,omitempty"` + // Subresource is one of the existing resource types. "" means none. + Subresource string `json:"subresource,omitempty"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + Name string `json:"name,omitempty"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + Path string `json:"path,omitempty"` + // Verb is the standard HTTP verb + Verb string `json:"verb,omitempty"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty"` + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty"` + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + User string `json:"user,omitempty"` + // Groups is the groups you're testing for. + Groups []string `json:"group,omitempty"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + Extra map[string][]string `json:"extra,omitempty"` +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty"` + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + Reason string `json:"reason,omitempty"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..d4c337db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", + "group": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go new file mode 100644 index 00000000..6e226a06 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go @@ -0,0 +1,129 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/autoscaling" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", autoscaling.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(autoscaling.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + autoscaling.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1.SchemeGroupVersion: + v1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go new file mode 100644 index 00000000..11ca6a05 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go @@ -0,0 +1,300 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1_Scale, + Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec, + Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + SetDefaults_HorizontalPodAutoscaler(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage + return nil +} + +func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage + return nil +} + +func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) +} + +func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) +} + +func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) +} + +func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} + +func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) +} + +func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} + +func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go new file mode 100644 index 00000000..6932ba63 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go @@ -0,0 +1,166 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1_CrossVersionObjectReference, + DeepCopy_v1_HorizontalPodAutoscaler, + DeepCopy_v1_HorizontalPodAutoscalerList, + DeepCopy_v1_HorizontalPodAutoscalerSpec, + DeepCopy_v1_HorizontalPodAutoscalerStatus, + DeepCopy_v1_Scale, + DeepCopy_v1_ScaleSpec, + DeepCopy_v1_ScaleStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func DeepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(in)) + for i := range in { + if err := DeepCopy_v1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { + if err := DeepCopy_v1_CrossVersionObjectReference(in.ScaleTargetRef, &out.ScaleTargetRef, c); err != nil { + return err + } + if in.MinReplicas != nil { + in, out := in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = *in + } else { + out.MinReplicas = nil + } + out.MaxReplicas = in.MaxReplicas + if in.TargetCPUUtilizationPercentage != nil { + in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + *out = new(int32) + **out = *in + } else { + out.TargetCPUUtilizationPercentage = nil + } + return nil +} + +func DeepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { + if in.ObservedGeneration != nil { + in, out := in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = *in + } else { + out.ObservedGeneration = nil + } + if in.LastScaleTime != nil { + in, out := in.LastScaleTime, &out.LastScaleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.LastScaleTime = nil + } + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentCPUUtilizationPercentage != nil { + in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = *in + } else { + out.CurrentCPUUtilizationPercentage = nil + } + return nil +} + +func DeepCopy_v1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ScaleStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { + out.Replicas = in.Replicas + return nil +} + +func DeepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go new file mode 100644 index 00000000..3fb24c46 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_HorizontalPodAutoscaler, + ) +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go new file mode 100644 index 00000000..1c67cc3a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go new file mode 100644 index 00000000..e90dd5d6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go @@ -0,0 +1,1612 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + Scale + ScaleSpec + ScaleStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (m *CrossVersionObjectReference) String() string { return proto.CompactTextString(m) } +func (*CrossVersionObjectReference) ProtoMessage() {} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscaler) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} + +func (m *Scale) Reset() { *m = Scale{} } +func (m *Scale) String() string { return proto.CompactTextString(m) } +func (*Scale) ProtoMessage() {} + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } +func (*ScaleSpec) ProtoMessage() {} + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } +func (*ScaleStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n9, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Selector))) + i += copy(data[i:], m.Selector) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto new file mode 100644 index 00000000..14f1e3db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto @@ -0,0 +1,130 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names + optional string name = 2; + + // API version of the referent + optional string apiVersion = 3; +} + +// configuration of a horizontal pod autoscaler. +message HorizontalPodAutoscaler { + // Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. + optional HorizontalPodAutoscalerSpec spec = 2; + + // current information about the autoscaler. + optional HorizontalPodAutoscalerStatus status = 3; +} + +// list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // Standard list metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// specification of a horizontal pod autoscaler. +message HorizontalPodAutoscalerSpec { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // lower limit for the number of pods that can be set by the autoscaler, default 1. + optional int32 minReplicas = 2; + + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + optional int32 maxReplicas = 3; + + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + optional int32 targetCPUUtilizationPercentage = 4; +} + +// current status of a horizontal pod autoscaler +message HorizontalPodAutoscalerStatus { + // most recent generation observed by this autoscaler. + optional int64 observedGeneration = 1; + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; + + // current number of replicas of pods managed by this autoscaler. + optional int32 currentReplicas = 3; + + // desired number of replicas of pods managed by this autoscaler. + optional int32 desiredReplicas = 4; + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + optional int32 currentCPUUtilizationPercentage = 5; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource. +message ScaleSpec { + // desired number of instances for the scaled object. + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + optional string selector = 2; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go new file mode 100644 index 00000000..fed2cdf4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + &Scale{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go new file mode 100644 index 00000000..7acb5272 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// specification of a horizontal pod autoscaler. +type HorizontalPodAutoscalerSpec struct { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // lower limit for the number of pods that can be set by the autoscaler, default 1. + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` +} + +// current status of a horizontal pod autoscaler +type HorizontalPodAutoscalerStatus struct { + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // current number of replicas of pods managed by this autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desired number of replicas of pods managed by this autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` +} + +// +genclient=true + +// configuration of a horizontal pod autoscaler. +type HorizontalPodAutoscaler struct { + unversioned.TypeMeta `json:",inline"` + // Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current information about the autoscaler. + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Scale represents a scaling request for a resource. +type Scale struct { + unversioned.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go new file mode 100644 index 00000000..904e36a6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go @@ -0,0 +1,117 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "configuration of a horizontal pod autoscaler.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status.", + "status": "current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "list of horizontal pod autoscaler objects.", + "metadata": "Standard list metadata.", + "items": "list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "specification of a horizontal pod autoscaler.", + "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", + "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "current status of a horizontal pod autoscaler", + "observedGeneration": "most recent generation observed by this autoscaler.", + "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource.", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go new file mode 100644 index 00000000..7a3493f3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go @@ -0,0 +1,140 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/batch/v1" + "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" + + // force determinstic ordering when loading these packages + _ "k8s.io/kubernetes/pkg/apis/extensions/install" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/batch" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion, v2alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", batch.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + case v2alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(batch.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + batch.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1.SchemeGroupVersion: + v1.AddToScheme(api.Scheme) + case v2alpha1.SchemeGroupVersion: + v2alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go new file mode 100644 index 00000000..2d163c6e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go @@ -0,0 +1,106 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobSpec_To_batch_JobSpec, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + + err = api.Scheme.AddFieldLabelConversionFunc("batch/v1", "Job", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector + if in.Selector != nil { + out.Selector = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector + if in.Selector != nil { + out.Selector = new(unversioned.LabelSelector) + if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go new file mode 100644 index 00000000..4bb13c49 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go @@ -0,0 +1,330 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + batch "k8s.io/kubernetes/pkg/apis/batch" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1_Job_To_batch_Job, + Convert_batch_Job_To_v1_Job, + Convert_v1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v1_JobCondition, + Convert_v1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v1_JobList, + Convert_v1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v1_JobStatus, + Convert_v1_LabelSelector_To_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_To_v1_LabelSelector, + Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, + Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + SetDefaults_Job(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v1_Job_To_batch_Job(in, out, s) +} + +func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v1_Job(in, out, s) +} + +func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s) +} + +func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s) +} + +func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v1_JobList_To_batch_JobList(in, out, s) +} + +func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v1_JobList(in, out, s) +} + +func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { + return err + } + } else { + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(*in, *out, s); err != nil { + return err + } + } else { + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]batch.JobCondition, len(*in)) + for i := range *in { + if err := Convert_v1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s) +} + +func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := Convert_batch_JobCondition_To_v1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s) +} + +func autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]unversioned.LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func Convert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + return autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in, out, s) +} + +func autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func Convert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in, out, s) +} + +func autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = unversioned.LabelSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) +} + +func autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = LabelSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go new file mode 100644 index 00000000..c2a50b4e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go @@ -0,0 +1,211 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1_Job, + DeepCopy_v1_JobCondition, + DeepCopy_v1_JobList, + DeepCopy_v1_JobSpec, + DeepCopy_v1_JobStatus, + DeepCopy_v1_LabelSelector, + DeepCopy_v1_LabelSelectorRequirement, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Job, len(in)) + for i := range in { + if err := DeepCopy_v1_Job(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { + if in.Parallelism != nil { + in, out := in.Parallelism, &out.Parallelism + *out = new(int32) + **out = *in + } else { + out.Parallelism = nil + } + if in.Completions != nil { + in, out := in.Completions, &out.Completions + *out = new(int32) + **out = *in + } else { + out.Completions = nil + } + if in.ActiveDeadlineSeconds != nil { + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.ActiveDeadlineSeconds = nil + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v1_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + in, out := in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = *in + } else { + out.ManualSelector = nil + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]JobCondition, len(in)) + for i := range in { + if err := DeepCopy_v1_JobCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + in, out := in.CompletionTime, &out.CompletionTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func DeepCopy_v1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { + if in.MatchLabels != nil { + in, out := in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.MatchLabels = nil + } + if in.MatchExpressions != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_v1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func DeepCopy_v1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + if in.Values != nil { + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Values = nil + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go new file mode 100644 index 00000000..81aa90c1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_Job, + ) +} + +func SetDefaults_Job(obj *Job) { + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go new file mode 100644 index 00000000..1c67cc3a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go new file mode 100644 index 00000000..95646919 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go @@ -0,0 +1,1901 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus + LabelSelector + LabelSelectorRequirement +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (m *JobCondition) String() string { return proto.CompactTextString(m) } +func (*JobCondition) ProtoMessage() {} + +func (m *JobList) Reset() { *m = JobList{} } +func (m *JobList) String() string { return proto.CompactTextString(m) } +func (*JobList) ProtoMessage() {} + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (m *JobSpec) String() string { return proto.CompactTextString(m) } +func (*JobSpec) ProtoMessage() {} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (m *LabelSelector) String() string { return proto.CompactTextString(m) } +func (*LabelSelector) ProtoMessage() {} + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*LabelSelectorRequirement) ProtoMessage() {} + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobStatus") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelectorRequirement") +} +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ManualSelector != nil { + data[i] = 0x28 + i++ + if *m.ManualSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto new file mode 100644 index 00000000..9c361ed7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto @@ -0,0 +1,176 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 4; + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md + optional bool manualSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; + + // Active is the number of actively running pods. + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + optional int32 failed = 6; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + repeated string values = 3; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go new file mode 100644 index 00000000..d8c087f1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go new file mode 100644 index 00000000..35ec0520 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go @@ -0,0 +1,186 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// +genclient=true + +// Job represents the configuration of a single job. +type Job struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// JobList is a collection of jobs. +type JobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Job. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // Active is the number of actively running pods. + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // Succeeded is the number of pods which reached Phase Succeeded. + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // Failed is the number of pods which reached Phase Failed. + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go new file mode 100644 index 00000000..21c621ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go @@ -0,0 +1,114 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Job.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", + "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", + "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "Active is the number of actively running pods.", + "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", + "failed": "Failed is the number of pods which reached Phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go new file mode 100644 index 00000000..4714fda0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "fmt" + "reflect" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_batch_JobSpec_To_v2alpha1_JobSpec, + Convert_v2alpha1_JobSpec_To_batch_JobSpec, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, kind := range []string{"Job", "JobTemplate", "ScheduledJob"} { + err = api.Scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + } + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func Convert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*batch.JobSpec))(in) + } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector + if in.Selector != nil { + out.Selector = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*JobSpec))(in) + } + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector + if in.Selector != nil { + out.Selector = new(unversioned.LabelSelector) + if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go new file mode 100644 index 00000000..c411875e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go @@ -0,0 +1,573 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + batch "k8s.io/kubernetes/pkg/apis/batch" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_Job_To_batch_Job, + Convert_batch_Job_To_v2alpha1_Job, + Convert_v2alpha1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v2alpha1_JobCondition, + Convert_v2alpha1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v2alpha1_JobList, + Convert_v2alpha1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v2alpha1_JobSpec, + Convert_v2alpha1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v2alpha1_JobStatus, + Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, + Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, + Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, + Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec, + Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector, + Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, + Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement, + Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob, + Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob, + Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList, + Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList, + Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec, + Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec, + Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus, + Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + SetDefaults_Job(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v2alpha1_Job_To_batch_Job(in, out, s) +} + +func autoConvert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_JobStatus_To_v2alpha1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v2alpha1_Job(in, out, s) +} + +func autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in, out, s) +} + +func autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { + return err + } + if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in, out, s) +} + +func autoConvert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v2alpha1_JobList_To_batch_JobList(in, out, s) +} + +func autoConvert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v2alpha1_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v2alpha1_JobList(in, out, s) +} + +func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { + return err + } + } else { + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(LabelSelector) + if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(*in, *out, s); err != nil { + return err + } + } else { + out.Selector = nil + } + out.ManualSelector = in.ManualSelector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]batch.JobCondition, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in, out, s) +} + +func autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := Convert_batch_JobCondition_To_v2alpha1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + out.StartTime = in.StartTime + out.CompletionTime = in.CompletionTime + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s) +} + +func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s) +} + +func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s) +} + +func autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]unversioned.LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { + return autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in, out, s) +} + +func autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + out.MatchLabels = in.MatchLabels + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if err := Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in, out, s) +} + +func autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = unversioned.LabelSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) +} + +func autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = LabelSelectorOperator(in.Operator) + out.Values = in.Values + return nil +} + +func Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { + return autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error { + SetDefaults_ScheduledJob(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in, out, s) +} + +func autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error { + return autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.ScheduledJob, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in, out, s) +} + +func autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScheduledJob, len(*in)) + for i := range *in { + if err := Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error { + return autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = in.StartingDeadlineSeconds + out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = in.Suspend + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in, out, s) +} + +func autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = in.StartingDeadlineSeconds + out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = in.Suspend + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + return nil +} + +func Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error { + return autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in, out, s) +} + +func autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error { + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]api.ObjectReference, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Active = nil + } + out.LastScheduleTime = in.LastScheduleTime + return nil +} + +func Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in, out, s) +} + +func autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error { + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Active = nil + } + out.LastScheduleTime = in.LastScheduleTime + return nil +} + +func Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error { + return autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go new file mode 100644 index 00000000..92cb71ea --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go @@ -0,0 +1,324 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v2alpha1_Job, + DeepCopy_v2alpha1_JobCondition, + DeepCopy_v2alpha1_JobList, + DeepCopy_v2alpha1_JobSpec, + DeepCopy_v2alpha1_JobStatus, + DeepCopy_v2alpha1_JobTemplate, + DeepCopy_v2alpha1_JobTemplateSpec, + DeepCopy_v2alpha1_LabelSelector, + DeepCopy_v2alpha1_LabelSelectorRequirement, + DeepCopy_v2alpha1_ScheduledJob, + DeepCopy_v2alpha1_ScheduledJobList, + DeepCopy_v2alpha1_ScheduledJobSpec, + DeepCopy_v2alpha1_ScheduledJobStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v2alpha1_Job(in Job, out *Job, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { + out.Type = in.Type + out.Status = in.Status + if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func DeepCopy_v2alpha1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Job, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_Job(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v2alpha1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { + if in.Parallelism != nil { + in, out := in.Parallelism, &out.Parallelism + *out = new(int32) + **out = *in + } else { + out.Parallelism = nil + } + if in.Completions != nil { + in, out := in.Completions, &out.Completions + *out = new(int32) + **out = *in + } else { + out.Completions = nil + } + if in.ActiveDeadlineSeconds != nil { + in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.ActiveDeadlineSeconds = nil + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(LabelSelector) + if err := DeepCopy_v2alpha1_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + if in.ManualSelector != nil { + in, out := in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = *in + } else { + out.ManualSelector = nil + } + if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { + if in.Conditions != nil { + in, out := in.Conditions, &out.Conditions + *out = make([]JobCondition, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_JobCondition(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } + if in.StartTime != nil { + in, out := in.StartTime, &out.StartTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.StartTime = nil + } + if in.CompletionTime != nil { + in, out := in.CompletionTime, &out.CompletionTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.CompletionTime = nil + } + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func DeepCopy_v2alpha1_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(in.Template, &out.Template, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error { + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { + if in.MatchLabels != nil { + in, out := in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.MatchLabels = nil + } + if in.MatchExpressions != nil { + in, out := in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.MatchExpressions = nil + } + return nil +} + +func DeepCopy_v2alpha1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { + out.Key = in.Key + out.Operator = in.Operator + if in.Values != nil { + in, out := in.Values, &out.Values + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Values = nil + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_ScheduledJobStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ScheduledJob, len(in)) + for i := range in { + if err := DeepCopy_v2alpha1_ScheduledJob(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error { + out.Schedule = in.Schedule + if in.StartingDeadlineSeconds != nil { + in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = *in + } else { + out.StartingDeadlineSeconds = nil + } + out.ConcurrencyPolicy = in.ConcurrencyPolicy + if in.Suspend != nil { + in, out := in.Suspend, &out.Suspend + *out = new(bool) + **out = *in + } else { + out.Suspend = nil + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v2alpha1_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error { + if in.Active != nil { + in, out := in.Active, &out.Active + *out = make([]v1.ObjectReference, len(in)) + for i := range in { + if err := v1.DeepCopy_v1_ObjectReference(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Active = nil + } + if in.LastScheduleTime != nil { + in, out := in.LastScheduleTime, &out.LastScheduleTime + *out = new(unversioned.Time) + if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { + return err + } + } else { + out.LastScheduleTime = nil + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go new file mode 100644 index 00000000..72da797c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_Job, + SetDefaults_ScheduledJob, + ) +} + +func SetDefaults_Job(obj *Job) { + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } +} + +func SetDefaults_ScheduledJob(obj *ScheduledJob) { + if obj.Spec.ConcurrencyPolicy == "" { + obj.Spec.ConcurrencyPolicy = AllowConcurrent + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go new file mode 100644 index 00000000..0e6b67b5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v2alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go new file mode 100644 index 00000000..17192c01 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go @@ -0,0 +1,3018 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus + JobTemplate + JobTemplateSpec + LabelSelector + LabelSelectorRequirement + ScheduledJob + ScheduledJobList + ScheduledJobSpec + ScheduledJobStatus +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (m *JobCondition) String() string { return proto.CompactTextString(m) } +func (*JobCondition) ProtoMessage() {} + +func (m *JobList) Reset() { *m = JobList{} } +func (m *JobList) String() string { return proto.CompactTextString(m) } +func (*JobList) ProtoMessage() {} + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (m *JobSpec) String() string { return proto.CompactTextString(m) } +func (*JobSpec) ProtoMessage() {} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (m *JobTemplate) String() string { return proto.CompactTextString(m) } +func (*JobTemplate) ProtoMessage() {} + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (m *JobTemplateSpec) String() string { return proto.CompactTextString(m) } +func (*JobTemplateSpec) ProtoMessage() {} + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (m *LabelSelector) String() string { return proto.CompactTextString(m) } +func (*LabelSelector) ProtoMessage() {} + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } +func (*LabelSelectorRequirement) ProtoMessage() {} + +func (m *ScheduledJob) Reset() { *m = ScheduledJob{} } +func (m *ScheduledJob) String() string { return proto.CompactTextString(m) } +func (*ScheduledJob) ProtoMessage() {} + +func (m *ScheduledJobList) Reset() { *m = ScheduledJobList{} } +func (m *ScheduledJobList) String() string { return proto.CompactTextString(m) } +func (*ScheduledJobList) ProtoMessage() {} + +func (m *ScheduledJobSpec) Reset() { *m = ScheduledJobSpec{} } +func (m *ScheduledJobSpec) String() string { return proto.CompactTextString(m) } +func (*ScheduledJobSpec) ProtoMessage() {} + +func (m *ScheduledJobStatus) Reset() { *m = ScheduledJobStatus{} } +func (m *ScheduledJobStatus) String() string { return proto.CompactTextString(m) } +func (*ScheduledJobStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelectorRequirement") + proto.RegisterType((*ScheduledJob)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJob") + proto.RegisterType((*ScheduledJobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobList") + proto.RegisterType((*ScheduledJobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobSpec") + proto.RegisterType((*ScheduledJobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobStatus") +} +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ManualSelector != nil { + data[i] = 0x28 + i++ + if *m.ManualSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func (m *JobTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n11, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n12, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + return i, nil +} + +func (m *JobTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ScheduledJob) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJob) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n16, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n17, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *ScheduledJobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ScheduledJobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Schedule))) + i += copy(data[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(data[i:], m.ConcurrencyPolicy) + if m.Suspend != nil { + data[i] = 0x20 + i++ + if *m.Suspend { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size())) + n19, err := m.JobTemplate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + return i, nil +} + +func (m *ScheduledJobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScheduledJobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Active) > 0 { + for _, msg := range m.Active { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LastScheduleTime != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size())) + n20, err := m.LastScheduleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func (m *JobTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScheduledJob) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScheduledJobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScheduledJobSpec) Size() (n int) { + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScheduledJobStatus) Size() (n int) { + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJob) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ScheduledJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduledJobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduledJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduledJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto new file mode 100644 index 00000000..cfcbd01c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto @@ -0,0 +1,253 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + optional LabelSelector selector = 4; + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md + optional bool manualSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; + + // Active is the number of actively running pods. + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + optional int32 failed = 6; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional JobSpec spec = 2; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + repeated string values = 3; +} + +// ScheduledJob represents the configuration of a single scheduled job. +message ScheduledJob { + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional ScheduledJobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + optional ScheduledJobStatus status = 3; +} + +// ScheduledJobList is a collection of scheduled jobs. +message ScheduledJobList { + // Standard list metadata + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of ScheduledJob. + repeated ScheduledJob items = 2; +} + +// ScheduledJobSpec describes how the job execution will look like and when it will actually run. +message ScheduledJobSpec { + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + optional int64 startingDeadlineSeconds = 2; + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + optional string concurrencyPolicy = 3; + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + optional bool suspend = 4; + + // JobTemplate is the object that describes the job that will be created when + // executing a ScheduledJob. + optional JobTemplateSpec jobTemplate = 5; +} + +// ScheduledJobStatus represents the current state of a Job. +message ScheduledJobStatus { + // Active holds pointers to currently running jobs. + repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScheduleTime = 4; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go new file mode 100644 index 00000000..00142f01 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + &JobTemplate{}, + &ScheduledJob{}, + &ScheduledJobList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go new file mode 100644 index 00000000..568f5171 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go @@ -0,0 +1,283 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" +) + +// Job represents the configuration of a single job. +type Job struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// JobList is a collection of jobs. +type JobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Job. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors + Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // Active is the number of actively running pods. + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // Succeeded is the number of pods which reached Phase Succeeded. + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // Failed is the number of pods which reached Phase Failed. + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// ScheduledJob represents the configuration of a single scheduled job. +type ScheduledJob struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Spec ScheduledJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status + Status ScheduledJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ScheduledJobList is a collection of scheduled jobs. +type ScheduledJobList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ScheduledJob. + Items []ScheduledJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ScheduledJobSpec describes how the job execution will look like and when it will actually run. +type ScheduledJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + Suspend *bool `json:"suspend" protobuf:"varint,4,opt,name=suspend"` + + // JobTemplate is the object that describes the job that will be created when + // executing a ScheduledJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows ScheduledJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// ScheduledJobStatus represents the current state of a Job. +type ScheduledJobStatus struct { + // Active holds pointers to currently running jobs. + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..95b86d0d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,178 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Job.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", + "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", + "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "Active is the number of actively running pods.", + "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", + "failed": "Failed is the number of pods which reached Phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "template": "Template defines jobs that will be created from this template http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_ScheduledJob = map[string]string{ + "": "ScheduledJob represents the configuration of a single scheduled job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", +} + +func (ScheduledJob) SwaggerDoc() map[string]string { + return map_ScheduledJob +} + +var map_ScheduledJobList = map[string]string{ + "": "ScheduledJobList is a collection of scheduled jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of ScheduledJob.", +} + +func (ScheduledJobList) SwaggerDoc() map[string]string { + return map_ScheduledJobList +} + +var map_ScheduledJobSpec = map[string]string{ + "": "ScheduledJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a ScheduledJob.", +} + +func (ScheduledJobSpec) SwaggerDoc() map[string]string { + return map_ScheduledJobSpec +} + +var map_ScheduledJobStatus = map[string]string{ + "": "ScheduledJobStatus represents the current state of a Job.", + "active": "Active holds pointers to currently running jobs.", + "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", +} + +func (ScheduledJobStatus) SwaggerDoc() map[string]string { + return map_ScheduledJobStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go new file mode 100644 index 00000000..e4f82bcb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go @@ -0,0 +1,374 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package componentconfig + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_componentconfig_IPVar, + DeepCopy_componentconfig_KubeControllerManagerConfiguration, + DeepCopy_componentconfig_KubeProxyConfiguration, + DeepCopy_componentconfig_KubeSchedulerConfiguration, + DeepCopy_componentconfig_KubeletConfiguration, + DeepCopy_componentconfig_LeaderElectionConfiguration, + DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration, + DeepCopy_componentconfig_PortRangeVar, + DeepCopy_componentconfig_VolumeConfiguration, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_componentconfig_IPVar(in IPVar, out *IPVar, c *conversion.Cloner) error { + if in.Val != nil { + in, out := in.Val, &out.Val + *out = new(string) + **out = *in + } else { + out.Val = nil + } + return nil +} + +func DeepCopy_componentconfig_KubeControllerManagerConfiguration(in KubeControllerManagerConfiguration, out *KubeControllerManagerConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Port = in.Port + out.Address = in.Address + out.CloudProvider = in.CloudProvider + out.CloudConfigFile = in.CloudConfigFile + out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs + out.ConcurrentRSSyncs = in.ConcurrentRSSyncs + out.ConcurrentRCSyncs = in.ConcurrentRCSyncs + out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs + out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs + out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs + out.ConcurrentJobSyncs = in.ConcurrentJobSyncs + out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs + out.ConcurrentSATokenSyncs = in.ConcurrentSATokenSyncs + out.LookupCacheSizeForRC = in.LookupCacheSizeForRC + out.LookupCacheSizeForRS = in.LookupCacheSizeForRS + out.LookupCacheSizeForDaemonSet = in.LookupCacheSizeForDaemonSet + if err := unversioned.DeepCopy_unversioned_Duration(in.ServiceSyncPeriod, &out.ServiceSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeSyncPeriod, &out.NodeSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.ResourceQuotaSyncPeriod, &out.ResourceQuotaSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NamespaceSyncPeriod, &out.NamespaceSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.PVClaimBinderSyncPeriod, &out.PVClaimBinderSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.MinResyncPeriod, &out.MinResyncPeriod, c); err != nil { + return err + } + out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold + if err := unversioned.DeepCopy_unversioned_Duration(in.HorizontalPodAutoscalerSyncPeriod, &out.HorizontalPodAutoscalerSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.DeploymentControllerSyncPeriod, &out.DeploymentControllerSyncPeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.PodEvictionTimeout, &out.PodEvictionTimeout, c); err != nil { + return err + } + out.DeletingPodsQps = in.DeletingPodsQps + out.DeletingPodsBurst = in.DeletingPodsBurst + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeMonitorGracePeriod, &out.NodeMonitorGracePeriod, c); err != nil { + return err + } + out.RegisterRetryCount = in.RegisterRetryCount + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeStartupGracePeriod, &out.NodeStartupGracePeriod, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeMonitorPeriod, &out.NodeMonitorPeriod, c); err != nil { + return err + } + out.ServiceAccountKeyFile = in.ServiceAccountKeyFile + out.EnableProfiling = in.EnableProfiling + out.ClusterName = in.ClusterName + out.ClusterCIDR = in.ClusterCIDR + out.ServiceCIDR = in.ServiceCIDR + out.NodeCIDRMaskSize = in.NodeCIDRMaskSize + out.AllocateNodeCIDRs = in.AllocateNodeCIDRs + out.ConfigureCloudRoutes = in.ConfigureCloudRoutes + out.RootCAFile = in.RootCAFile + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { + return err + } + if err := DeepCopy_componentconfig_VolumeConfiguration(in.VolumeConfiguration, &out.VolumeConfiguration, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.ControllerStartInterval, &out.ControllerStartInterval, c); err != nil { + return err + } + out.EnableGarbageCollector = in.EnableGarbageCollector + return nil +} + +func DeepCopy_componentconfig_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR + out.HealthzBindAddress = in.HealthzBindAddress + out.HealthzPort = in.HealthzPort + out.HostnameOverride = in.HostnameOverride + if in.IPTablesMasqueradeBit != nil { + in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit + *out = new(int32) + **out = *in + } else { + out.IPTablesMasqueradeBit = nil + } + if err := unversioned.DeepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { + return err + } + out.KubeconfigPath = in.KubeconfigPath + out.MasqueradeAll = in.MasqueradeAll + out.Master = in.Master + if in.OOMScoreAdj != nil { + in, out := in.OOMScoreAdj, &out.OOMScoreAdj + *out = new(int32) + **out = *in + } else { + out.OOMScoreAdj = nil + } + out.Mode = in.Mode + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + if err := unversioned.DeepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { + return err + } + out.ConntrackMax = in.ConntrackMax + if err := unversioned.DeepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { + return err + } + return nil +} + +func DeepCopy_componentconfig_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Port = in.Port + out.Address = in.Address + out.AlgorithmProvider = in.AlgorithmProvider + out.PolicyConfigFile = in.PolicyConfigFile + out.EnableProfiling = in.EnableProfiling + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { + return err + } + return nil +} + +func DeepCopy_componentconfig_KubeletConfiguration(in KubeletConfiguration, out *KubeletConfiguration, c *conversion.Cloner) error { + out.Config = in.Config + if err := unversioned.DeepCopy_unversioned_Duration(in.SyncFrequency, &out.SyncFrequency, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.FileCheckFrequency, &out.FileCheckFrequency, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.HTTPCheckFrequency, &out.HTTPCheckFrequency, c); err != nil { + return err + } + out.ManifestURL = in.ManifestURL + out.ManifestURLHeader = in.ManifestURLHeader + out.EnableServer = in.EnableServer + out.Address = in.Address + out.Port = in.Port + out.ReadOnlyPort = in.ReadOnlyPort + out.TLSCertFile = in.TLSCertFile + out.TLSPrivateKeyFile = in.TLSPrivateKeyFile + out.CertDirectory = in.CertDirectory + out.HostnameOverride = in.HostnameOverride + out.PodInfraContainerImage = in.PodInfraContainerImage + out.DockerEndpoint = in.DockerEndpoint + out.RootDirectory = in.RootDirectory + out.SeccompProfileRoot = in.SeccompProfileRoot + out.AllowPrivileged = in.AllowPrivileged + out.HostNetworkSources = in.HostNetworkSources + out.HostPIDSources = in.HostPIDSources + out.HostIPCSources = in.HostIPCSources + out.RegistryPullQPS = in.RegistryPullQPS + out.RegistryBurst = in.RegistryBurst + out.EventRecordQPS = in.EventRecordQPS + out.EventBurst = in.EventBurst + out.EnableDebuggingHandlers = in.EnableDebuggingHandlers + if err := unversioned.DeepCopy_unversioned_Duration(in.MinimumGCAge, &out.MinimumGCAge, c); err != nil { + return err + } + out.MaxPerPodContainerCount = in.MaxPerPodContainerCount + out.MaxContainerCount = in.MaxContainerCount + out.CAdvisorPort = in.CAdvisorPort + out.HealthzPort = in.HealthzPort + out.HealthzBindAddress = in.HealthzBindAddress + out.OOMScoreAdj = in.OOMScoreAdj + out.RegisterNode = in.RegisterNode + out.ClusterDomain = in.ClusterDomain + out.MasterServiceNamespace = in.MasterServiceNamespace + out.ClusterDNS = in.ClusterDNS + if err := unversioned.DeepCopy_unversioned_Duration(in.StreamingConnectionIdleTimeout, &out.StreamingConnectionIdleTimeout, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.NodeStatusUpdateFrequency, &out.NodeStatusUpdateFrequency, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.ImageMinimumGCAge, &out.ImageMinimumGCAge, c); err != nil { + return err + } + out.ImageGCHighThresholdPercent = in.ImageGCHighThresholdPercent + out.ImageGCLowThresholdPercent = in.ImageGCLowThresholdPercent + out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB + if err := unversioned.DeepCopy_unversioned_Duration(in.VolumeStatsAggPeriod, &out.VolumeStatsAggPeriod, c); err != nil { + return err + } + out.NetworkPluginName = in.NetworkPluginName + out.NetworkPluginDir = in.NetworkPluginDir + out.VolumePluginDir = in.VolumePluginDir + out.CloudProvider = in.CloudProvider + out.CloudConfigFile = in.CloudConfigFile + out.KubeletCgroups = in.KubeletCgroups + out.RuntimeCgroups = in.RuntimeCgroups + out.SystemCgroups = in.SystemCgroups + out.CgroupRoot = in.CgroupRoot + out.ContainerRuntime = in.ContainerRuntime + if err := unversioned.DeepCopy_unversioned_Duration(in.RuntimeRequestTimeout, &out.RuntimeRequestTimeout, c); err != nil { + return err + } + out.RktPath = in.RktPath + out.RktAPIEndpoint = in.RktAPIEndpoint + out.RktStage1Image = in.RktStage1Image + out.LockFilePath = in.LockFilePath + out.ExitOnLockContention = in.ExitOnLockContention + out.ConfigureCBR0 = in.ConfigureCBR0 + out.HairpinMode = in.HairpinMode + out.BabysitDaemons = in.BabysitDaemons + out.MaxPods = in.MaxPods + out.NvidiaGPUs = in.NvidiaGPUs + out.DockerExecHandlerName = in.DockerExecHandlerName + out.PodCIDR = in.PodCIDR + out.ResolverConfig = in.ResolverConfig + out.CPUCFSQuota = in.CPUCFSQuota + out.Containerized = in.Containerized + out.MaxOpenFiles = in.MaxOpenFiles + out.ReconcileCIDR = in.ReconcileCIDR + out.RegisterSchedulable = in.RegisterSchedulable + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + out.SerializeImagePulls = in.SerializeImagePulls + out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay + if err := unversioned.DeepCopy_unversioned_Duration(in.OutOfDiskTransitionFrequency, &out.OutOfDiskTransitionFrequency, c); err != nil { + return err + } + out.NodeIP = in.NodeIP + if in.NodeLabels != nil { + in, out := in.NodeLabels, &out.NodeLabels + *out = make(map[string]string) + for key, val := range in { + (*out)[key] = val + } + } else { + out.NodeLabels = nil + } + out.NonMasqueradeCIDR = in.NonMasqueradeCIDR + out.EnableCustomMetrics = in.EnableCustomMetrics + out.EvictionHard = in.EvictionHard + out.EvictionSoft = in.EvictionSoft + out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod + if err := unversioned.DeepCopy_unversioned_Duration(in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod, c); err != nil { + return err + } + out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod + out.PodsPerCore = in.PodsPerCore + out.EnableControllerAttachDetach = in.EnableControllerAttachDetach + return nil +} + +func DeepCopy_componentconfig_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { + out.LeaderElect = in.LeaderElect + if err := unversioned.DeepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { + return err + } + return nil +} + +func DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in PersistentVolumeRecyclerConfiguration, out *PersistentVolumeRecyclerConfiguration, c *conversion.Cloner) error { + out.MaximumRetry = in.MaximumRetry + out.MinimumTimeoutNFS = in.MinimumTimeoutNFS + out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS + out.IncrementTimeoutNFS = in.IncrementTimeoutNFS + out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath + out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath + out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath + return nil +} + +func DeepCopy_componentconfig_PortRangeVar(in PortRangeVar, out *PortRangeVar, c *conversion.Cloner) error { + if in.Val != nil { + in, out := in.Val, &out.Val + *out = new(string) + **out = *in + } else { + out.Val = nil + } + return nil +} + +func DeepCopy_componentconfig_VolumeConfiguration(in VolumeConfiguration, out *VolumeConfiguration, c *conversion.Cloner) error { + out.EnableHostPathProvisioning = in.EnableHostPathProvisioning + out.EnableDynamicProvisioning = in.EnableDynamicProvisioning + if err := DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, c); err != nil { + return err + } + out.FlexVolumePluginDir = in.FlexVolumePluginDir + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go new file mode 100644 index 00000000..edd9c797 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentconfig + +import ( + "fmt" + "net" + + utilnet "k8s.io/kubernetes/pkg/util/net" +) + +// used for validating command line opts +// TODO(mikedanese): remove these when we remove command line flags + +type IPVar struct { + Val *string +} + +func (v IPVar) Set(s string) error { + if net.ParseIP(s) == nil { + return fmt.Errorf("%q is not a valid IP address", s) + } + if v.Val == nil { + // it's okay to panic here since this is programmer error + panic("the string pointer passed into IPVar should not be nil") + } + *v.Val = s + return nil +} + +func (v IPVar) String() string { + if v.Val == nil { + return "" + } + return *v.Val +} + +func (v IPVar) Type() string { + return "ip" +} + +func (m *ProxyMode) Set(s string) error { + *m = ProxyMode(s) + return nil +} + +func (m *ProxyMode) String() string { + if m != nil { + return string(*m) + } + return "" +} + +func (m *ProxyMode) Type() string { + return "ProxyMode" +} + +type PortRangeVar struct { + Val *string +} + +func (v PortRangeVar) Set(s string) error { + if _, err := utilnet.ParsePortRange(s); err != nil { + return fmt.Errorf("%q is not a valid port range: %v", s, err) + } + if v.Val == nil { + // it's okay to panic here since this is programmer error + panic("the string pointer passed into PortRangeVar should not be nil") + } + *v.Val = s + return nil +} + +func (v PortRangeVar) String() string { + if v.Val == nil { + return "" + } + return *v.Val +} + +func (v PortRangeVar) Type() string { + return "port-range" +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go new file mode 100644 index 00000000..ec555427 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go @@ -0,0 +1,129 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/componentconfig" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", componentconfig.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(componentconfig.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + componentconfig.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go new file mode 100644 index 00000000..0666c543 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentconfig + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +// GroupName is the group name use in this package +const GroupName = "componentconfig" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeProxyConfiguration{}, + &KubeSchedulerConfiguration{}, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go new file mode 100644 index 00000000..0c7dfc9a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go @@ -0,0 +1,621 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentconfig + +import "k8s.io/kubernetes/pkg/api/unversioned" + +type KubeProxyConfiguration struct { + unversioned.TypeMeta + + // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 + // for all interfaces) + BindAddress string `json:"bindAddress"` + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string `json:"clusterCIDR"` + // healthzBindAddress is the IP address for the health check server to serve on, + // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) + HealthzBindAddress string `json:"healthzBindAddress"` + // healthzPort is the port to bind the health check server. Use 0 to disable. + HealthzPort int32 `json:"healthzPort"` + // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. + HostnameOverride string `json:"hostnameOverride"` + // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using + // the pure iptables proxy mode. Values must be within the range [0, 31]. + IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` + // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` + // kubeconfigPath is the path to the kubeconfig file with authorization information (the + // master location is set by the master flag). + KubeconfigPath string `json:"kubeconfigPath"` + // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + MasqueradeAll bool `json:"masqueradeAll"` + // master is the address of the Kubernetes API server (overrides any value in kubeconfig) + Master string `json:"master"` + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within + // the range [-1000, 1000] + OOMScoreAdj *int32 `json:"oomScoreAdj"` + // mode specifies which proxy mode to use. + Mode ProxyMode `json:"mode"` + // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed + // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. + PortRange string `json:"portRange"` + // resourceContainer is the absolute name of the resource-only container to create and run + // the Kube-proxy in (Default: /kube-proxy). + ResourceContainer string `json:"kubeletCgroups"` + // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). + // Must be greater than 0. Only applicable for proxyMode=userspace. + UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` + // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)") + ConntrackMax int32 `json:"conntrackMax"` + // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open + // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace + ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` +} + +// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' +// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the +// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the +// best-available proxy (currently iptables, but may change in future versions). If the +// iptables proxy is selected, regardless of how, but the system's kernel or iptables +// versions are insufficient, this always falls back to the userspace proxy. +type ProxyMode string + +const ( + ProxyModeUserspace ProxyMode = "userspace" + ProxyModeIPTables ProxyMode = "iptables" +) + +// HairpinMode denotes how the kubelet should configure networking to handle +// hairpin packets. +type HairpinMode string + +// Enum settings for different ways to handle hairpin packets. +const ( + // Set the hairpin flag on the veth of containers in the respective + // container runtime. + HairpinVeth = "hairpin-veth" + // Make the container bridge promiscuous. This will force it to accept + // hairpin packets, even if the flag isn't set on ports of the bridge. + PromiscuousBridge = "promiscuous-bridge" + // Neither of the above. If the kubelet is started in this hairpin mode + // and kube-proxy is running in iptables mode, hairpin packets will be + // dropped by the container bridge. + HairpinNone = "none" +) + +// TODO: curate the ordering and structure of this config object +type KubeletConfiguration struct { + // config is the path to the config file or directory of files + Config string `json:"config"` + // syncFrequency is the max period between synchronizing running + // containers and config + SyncFrequency unversioned.Duration `json:"syncFrequency"` + // fileCheckFrequency is the duration between checking config files for + // new data + FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"` + // httpCheckFrequency is the duration between checking http for new data + HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"` + // manifestURL is the URL for accessing the container manifest + ManifestURL string `json:"manifestURL"` + // manifestURLHeader is the HTTP header to use when accessing the manifest + // URL, with the key separated from the value with a ':', as in 'key:value' + ManifestURLHeader string `json:"manifestURLHeader"` + // enableServer enables the Kubelet's server + EnableServer bool `json:"enableServer"` + // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 + // for all interfaces) + Address string `json:"address"` + // port is the port for the Kubelet to serve on. + Port uint `json:"port"` + // readOnlyPort is the read-only port for the Kubelet to serve on with + // no authentication/authorization (set to 0 to disable) + ReadOnlyPort uint `json:"readOnlyPort"` + // tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert, + // if any, concatenated after server cert). If tlsCertFile and + // tlsPrivateKeyFile are not provided, a self-signed certificate + // and key are generated for the public address and saved to the directory + // passed to certDir. + TLSCertFile string `json:"tLSCertFile"` + // tLSPrivateKeyFile is the ile containing x509 private key matching + // tlsCertFile. + TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"` + // certDirectory is the directory where the TLS certs are located (by + // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile + // are provided, this flag will be ignored. + CertDirectory string `json:"certDirectory"` + // hostnameOverride is the hostname used to identify the kubelet instead + // of the actual hostname. + HostnameOverride string `json:"hostnameOverride"` + // podInfraContainerImage is the image whose network/ipc namespaces + // containers in each pod will use. + PodInfraContainerImage string `json:"podInfraContainerImage"` + // dockerEndpoint is the path to the docker endpoint to communicate with. + DockerEndpoint string `json:"dockerEndpoint"` + // rootDirectory is the directory path to place kubelet files (volume + // mounts,etc). + RootDirectory string `json:"rootDirectory"` + // seccompProfileRoot is the directory path for seccomp profiles. + SeccompProfileRoot string `json:"seccompProfileRoot"` + // allowPrivileged enables containers to request privileged mode. + // Defaults to false. + AllowPrivileged bool `json:"allowPrivileged"` + // hostNetworkSources is a comma-separated list of sources from which the + // Kubelet allows pods to use of host network. Defaults to "*". + HostNetworkSources string `json:"hostNetworkSources"` + // hostPIDSources is a comma-separated list of sources from which the + // Kubelet allows pods to use the host pid namespace. Defaults to "*". + HostPIDSources string `json:"hostPIDSources"` + // hostIPCSources is a comma-separated list of sources from which the + // Kubelet allows pods to use the host ipc namespace. Defaults to "*". + HostIPCSources string `json:"hostIPCSources"` + // registryPullQPS is the limit of registry pulls per second. If 0, + // unlimited. Set to 0 for no limit. Defaults to 5.0. + RegistryPullQPS float64 `json:"registryPullQPS"` + // registryBurst is the maximum size of a bursty pulls, temporarily allows + // pulls to burst to this number, while still not exceeding registryQps. + // Only used if registryQps > 0. + RegistryBurst int32 `json:"registryBurst"` + // eventRecordQPS is the maximum event creations per second. If 0, there + // is no limit enforced. + EventRecordQPS float32 `json:"eventRecordQPS"` + // eventBurst is the maximum size of a bursty event records, temporarily + // allows event records to burst to this number, while still not exceeding + // event-qps. Only used if eventQps > 0 + EventBurst int32 `json:"eventBurst"` + // enableDebuggingHandlers enables server endpoints for log collection + // and local running of containers and commands + EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"` + // minimumGCAge is the minimum age for a finished container before it is + // garbage collected. + MinimumGCAge unversioned.Duration `json:"minimumGCAge"` + // maxPerPodContainerCount is the maximum number of old instances to + // retain per container. Each container takes up some disk space. + MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` + // maxContainerCount is the maximum number of old instances of containers + // to retain globally. Each container takes up some disk space. + MaxContainerCount int32 `json:"maxContainerCount"` + // cAdvisorPort is the port of the localhost cAdvisor endpoint + CAdvisorPort uint `json:"cAdvisorPort"` + // healthzPort is the port of the localhost healthz endpoint + HealthzPort int32 `json:"healthzPort"` + // healthzBindAddress is the IP address for the healthz server to serve + // on. + HealthzBindAddress string `json:"healthzBindAddress"` + // oomScoreAdj is The oom-score-adj value for kubelet process. Values + // must be within the range [-1000, 1000]. + OOMScoreAdj int32 `json:"oomScoreAdj"` + // registerNode enables automatic registration with the apiserver. + RegisterNode bool `json:"registerNode"` + // clusterDomain is the DNS domain for this cluster. If set, kubelet will + // configure all containers to search this domain in addition to the + // host's search domains. + ClusterDomain string `json:"clusterDomain"` + // masterServiceNamespace is The namespace from which the kubernetes + // master services should be injected into pods. + MasterServiceNamespace string `json:"masterServiceNamespace"` + // clusterDNS is the IP address for a cluster DNS server. If set, kubelet + // will configure all containers to use this for DNS resolution in + // addition to the host's DNS servers + ClusterDNS string `json:"clusterDNS"` + // streamingConnectionIdleTimeout is the maximum time a streaming connection + // can be idle before the connection is automatically closed. + StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"` + // nodeStatusUpdateFrequency is the frequency that kubelet posts node + // status to master. Note: be cautious when changing the constant, it + // must work with nodeMonitorGracePeriod in nodecontroller. + NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"` + // minimumGCAge is the minimum age for a unused image before it is + // garbage collected. + ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` + // imageGCHighThresholdPercent is the percent of disk usage after which + // image garbage collection is always run. + ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"` + // imageGCLowThresholdPercent is the percent of disk usage before which + // image garbage collection is never run. Lowest disk usage to garbage + // collect to. + ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"` + // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to + // maintain. When disk space falls below this threshold, new pods would + // be rejected. + LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` + // How frequently to calculate and cache volume disk usage for all pods + VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` + // networkPluginName is the name of the network plugin to be invoked for + // various events in kubelet/pod lifecycle + NetworkPluginName string `json:"networkPluginName"` + // networkPluginDir is the full path of the directory in which to search + // for network plugins + NetworkPluginDir string `json:"networkPluginDir"` + // volumePluginDir is the full path of the directory in which to search + // for additional third party volume plugins + VolumePluginDir string `json:"volumePluginDir"` + // cloudProvider is the provider for cloud services. + CloudProvider string `json:"cloudProvider,omitempty"` + // cloudConfigFile is the path to the cloud provider configuration file. + CloudConfigFile string `json:"cloudConfigFile,omitempty"` + // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. + KubeletCgroups string `json:"kubeletCgroups,omitempty"` + // Cgroups that container runtime is expected to be isolated in. + RuntimeCgroups string `json:"runtimeCgroups,omitempty"` + // SystemCgroups is absolute name of cgroups in which to place + // all non-kernel processes that are not already in a container. Empty + // for no container. Rolling back the flag requires a reboot. + SystemCgroups string `json:"systemContainer,omitempty"` + // cgroupRoot is the root cgroup to use for pods. This is handled by the + // container runtime on a best effort basis. + CgroupRoot string `json:"cgroupRoot,omitempty"` + // containerRuntime is the container runtime to use. + ContainerRuntime string `json:"containerRuntime"` + // runtimeRequestTimeout is the timeout for all runtime requests except long running + // requests - pull, logs, exec and attach. + RuntimeRequestTimeout unversioned.Duration `json:"runtimeRequestTimeout,omitempty"` + // rktPath is the path of rkt binary. Leave empty to use the first rkt in + // $PATH. + RktPath string `json:"rktPath,omitempty"` + // rktApiEndpoint is the endpoint of the rkt API service to communicate with. + RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"` + // rktStage1Image is the image to use as stage1. Local paths and + // http/https URLs are supported. + RktStage1Image string `json:"rktStage1Image,omitempty"` + // lockFilePath is the path that kubelet will use to as a lock file. + // It uses this file as a lock to synchronize with other kubelet processes + // that may be running. + LockFilePath string `json:"lockFilePath"` + // ExitOnLockContention is a flag that signifies to the kubelet that it is running + // in "bootstrap" mode. This requires that 'LockFilePath' has been set. + // This will cause the kubelet to listen to inotify events on the lock file, + // releasing it and exiting when another process tries to open that file. + ExitOnLockContention bool `json:"exitOnLockContention"` + // configureCBR0 enables the kublet to configure cbr0 based on + // Node.Spec.PodCIDR. + ConfigureCBR0 bool `json:"configureCbr0"` + // How should the kubelet configure the container bridge for hairpin packets. + // Setting this flag allows endpoints in a Service to loadbalance back to + // themselves if they should try to access their own Service. Values: + // "promiscuous-bridge": make the container bridge promiscuous. + // "hairpin-veth": set the hairpin flag on container veth interfaces. + // "none": do nothing. + // Setting --configure-cbr0 to false implies that to achieve hairpin NAT + // one must set --hairpin-mode=veth-flag, because bridge assumes the + // existence of a container bridge named cbr0. + HairpinMode string `json:"hairpinMode"` + // The node has babysitter process monitoring docker and kubelet. + BabysitDaemons bool `json:"babysitDaemons"` + // maxPods is the number of pods that can run on this Kubelet. + MaxPods int32 `json:"maxPods"` + // nvidiaGPUs is the number of NVIDIA GPU devices on this node. + NvidiaGPUs int32 `json:"nvidiaGPUs"` + // dockerExecHandlerName is the handler to use when executing a command + // in a container. Valid values are 'native' and 'nsenter'. Defaults to + // 'native'. + DockerExecHandlerName string `json:"dockerExecHandlerName"` + // The CIDR to use for pod IP addresses, only used in standalone mode. + // In cluster mode, this is obtained from the master. + PodCIDR string `json:"podCIDR"` + // ResolverConfig is the resolver configuration file used as the basis + // for the container DNS resolution configuration."), [] + ResolverConfig string `json:"resolvConf"` + // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that + // specify CPU limits + CPUCFSQuota bool `json:"cpuCFSQuota"` + // containerized should be set to true if kubelet is running in a container. + Containerized bool `json:"containerized"` + // maxOpenFiles is Number of files that can be opened by Kubelet process. + MaxOpenFiles uint64 `json:"maxOpenFiles"` + // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the + // API server. No-op if register-node or configure-cbr0 is false. + ReconcileCIDR bool `json:"reconcileCIDR"` + // registerSchedulable tells the kubelet to register the node as + // schedulable. No-op if register-node is false. + RegisterSchedulable bool `json:"registerSchedulable"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver + KubeAPIQPS float32 `json:"kubeAPIQPS"` + // kubeAPIBurst is the burst to allow while talking with kubernetes + // apiserver + KubeAPIBurst int32 `json:"kubeAPIBurst"` + // serializeImagePulls when enabled, tells the Kubelet to pull images one + // at a time. We recommend *not* changing the default value on nodes that + // run docker daemon with version < 1.9 or an Aufs storage backend. + // Issue #10959 has more details. + SerializeImagePulls bool `json:"serializeImagePulls"` + // experimentalFlannelOverlay enables experimental support for starting the + // kubelet with the default overlay network (flannel). Assumes flanneld + // is already running in client mode. + ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"` + // outOfDiskTransitionFrequency is duration for which the kubelet has to + // wait before transitioning out of out-of-disk node condition status. + OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"` + // nodeIP is IP address of the node. If set, kubelet will use this IP + // address for the node. + NodeIP string `json:"nodeIP,omitempty"` + // nodeLabels to add when registering the node in the cluster. + NodeLabels map[string]string `json:"nodeLabels"` + // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. + NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` + // enable gathering custom metrics. + EnableCustomMetrics bool `json:"enableCustomMetrics"` + // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. + EvictionHard string `json:"evictionHard,omitempty"` + // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. + EvictionSoft string `json:"evictionSoft,omitempty"` + // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. + EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"` + // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"` + // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"` + // Maximum number of pods per core. Cannot exceed MaxPods + PodsPerCore int32 `json:"podsPerCore"` + // enableControllerAttachDetach enables the Attach/Detach controller to + // manage attachment/detachment of volumes scheduled to this node, and + // disables kubelet from executing any attach/detach operations + EnableControllerAttachDetach bool `json:"enableControllerAttachDetach"` +} + +type KubeSchedulerConfiguration struct { + unversioned.TypeMeta + + // port is the port that the scheduler's http service runs on. + Port int32 `json:"port"` + // address is the IP address to serve on. + Address string `json:"address"` + // algorithmProvider is the scheduling algorithm provider to use. + AlgorithmProvider string `json:"algorithmProvider"` + // policyConfigFile is the filepath to the scheduler policy configuration. + PolicyConfigFile string `json:"policyConfigFile"` + // enableProfiling enables profiling via web interface. + EnableProfiling bool `json:"enableProfiling"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. + KubeAPIQPS float32 `json:"kubeAPIQPS"` + // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. + KubeAPIBurst int32 `json:"kubeAPIBurst"` + // schedulerName is name of the scheduler, used to select which pods + // will be processed by this scheduler, based on pod's annotation with + // key 'scheduler.alpha.kubernetes.io/name'. + SchedulerName string `json:"schedulerName"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string `json:"failureDomains"` + // leaderElection defines the configuration of leader election client. + LeaderElection LeaderElectionConfiguration `json:"leaderElection"` +} + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaderElect enables a leader election client to gain leadership + // before executing the main loop. Enable this when running replicated + // components for high availability. + LeaderElect bool `json:"leaderElect"` + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration unversioned.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline unversioned.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod unversioned.Duration `json:"retryPeriod"` +} + +type KubeControllerManagerConfiguration struct { + unversioned.TypeMeta + + // port is the port that the controller-manager's http service runs on. + Port int32 `json:"port"` + // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). + Address string `json:"address"` + // cloudProvider is the provider for cloud services. + CloudProvider string `json:"cloudProvider"` + // cloudConfigFile is the path to the cloud provider configuration file. + CloudConfigFile string `json:"cloudConfigFile"` + // concurrentEndpointSyncs is the number of endpoint syncing operations + // that will be done concurrently. Larger number = faster endpoint updating, + // but more CPU (and network) load. + ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` + // concurrentRSSyncs is the number of replica sets that are allowed to sync + // concurrently. Larger number = more responsive replica management, but more + // CPU (and network) load. + ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` + // concurrentRCSyncs is the number of replication controllers that are + // allowed to sync concurrently. Larger number = more responsive replica + // management, but more CPU (and network) load. + ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` + // concurrentResourceQuotaSyncs is the number of resource quotas that are + // allowed to sync concurrently. Larger number = more responsive quota + // management, but more CPU (and network) load. + ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` + // concurrentDeploymentSyncs is the number of deployment objects that are + // allowed to sync concurrently. Larger number = more responsive deployments, + // but more CPU (and network) load. + ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` + // concurrentDaemonSetSyncs is the number of daemonset objects that are + // allowed to sync concurrently. Larger number = more responsive daemonset, + // but more CPU (and network) load. + ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` + // concurrentJobSyncs is the number of job objects that are + // allowed to sync concurrently. Larger number = more responsive jobs, + // but more CPU (and network) load. + ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` + // concurrentNamespaceSyncs is the number of namespace objects that are + // allowed to sync concurrently. + ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` + // concurrentSATokenSyncs is the number of service account token syncing operations + // that will be done concurrently. + ConcurrentSATokenSyncs int32 `json:"concurrentSATokenSyncs"` + // lookupCacheSizeForRC is the size of lookup cache for replication controllers. + // Larger number = more responsive replica management, but more MEM load. + LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"` + // lookupCacheSizeForRS is the size of lookup cache for replicatsets. + // Larger number = more responsive replica management, but more MEM load. + LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"` + // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets. + // Larger number = more responsive daemonset, but more MEM load. + LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"` + // serviceSyncPeriod is the period for syncing services with their external + // load balancers. + ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"` + // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer + // periods will result in fewer calls to cloud provider, but may delay addition + // of new nodes to cluster. + NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"` + // resourceQuotaSyncPeriod is the period for syncing quota usage status + // in the system. + ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"` + // namespaceSyncPeriod is the period for syncing namespace life-cycle + // updates. + NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"` + // pvClaimBinderSyncPeriod is the period for syncing persistent volumes + // and persistent volume claims. + PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"` + // minResyncPeriod is the resync period in reflectors; will be random between + // minResyncPeriod and 2*minResyncPeriod. + MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"` + // terminatedPodGCThreshold is the number of terminated pods that can exist + // before the terminated pod garbage collector starts deleting terminated pods. + // If <= 0, the terminated pod garbage collector is disabled. + TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` + // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of + // pods in horizontal pod autoscaler. + HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"` + // deploymentControllerSyncPeriod is the period for syncing the deployments. + DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"` + // podEvictionTimeout is the grace period for deleting pods on failed nodes. + PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"` + // deletingPodsQps is the number of nodes per second on which pods are deleted in + // case of node failure. + DeletingPodsQps float32 `json:"deletingPodsQps"` + // deletingPodsBurst is the number of nodes on which pods are bursty deleted in + // case of node failure. For more details look into RateLimiter. + DeletingPodsBurst int32 `json:"deletingPodsBurst"` + // nodeMontiorGracePeriod is the amount of time which we allow a running node to be + // unresponsive before marking it unhealty. Must be N times more than kubelet's + // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet + // to post node status. + NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"` + // registerRetryCount is the number of retries for initial node registration. + // Retry interval equals node-sync-period. + RegisterRetryCount int32 `json:"registerRetryCount"` + // nodeStartupGracePeriod is the amount of time which we allow starting a node to + // be unresponsive before marking it unhealty. + NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"` + // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. + NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"` + // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key + // used to sign service account tokens. + ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling bool `json:"enableProfiling"` + // clusterName is the instance prefix for the cluster. + ClusterName string `json:"clusterName"` + // clusterCIDR is CIDR Range for Pods in cluster. + ClusterCIDR string `json:"clusterCIDR"` + // serviceCIDR is CIDR Range for Services in cluster. + ServiceCIDR string `json:"serviceCIDR"` + // NodeCIDRMaskSize is the mask size for node cidr in cluster. + NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` + // allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if + // ConfigureCloudRoutes is true, to be set on the cloud provider. + AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` + // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs + // to be configured on the cloud provider. + ConfigureCloudRoutes bool `json:"configureCloudRoutes"` + // rootCAFile is the root certificate authority will be included in service + // account's token secret. This must be a valid PEM-encoded CA bundle. + RootCAFile string `json:"rootCAFile"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. + KubeAPIQPS float32 `json:"kubeAPIQPS"` + // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. + KubeAPIBurst int32 `json:"kubeAPIBurst"` + // leaderElection defines the configuration of leader election client. + LeaderElection LeaderElectionConfiguration `json:"leaderElection"` + // volumeConfiguration holds configuration for volume related features. + VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` + // How long to wait between starting controller managers + ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"` + // enables the generic garbage collector. MUST be synced with the + // corresponding flag of the kube-apiserver. WARNING: the generic garbage + // collector is an alpha feature. + EnableGarbageCollector bool `json:"enableGarbageCollector"` +} + +// VolumeConfiguration contains *all* enumerated flags meant to configure all volume +// plugins. From this config, the controller-manager binary will create many instances of +// volume.VolumeConfig, each containing only the configuration needed for that plugin which +// are then passed to the appropriate plugin. The ControllerManager binary is the only part +// of the code which knows what plugins are supported and which flags correspond to each plugin. +type VolumeConfiguration struct { + // enableHostPathProvisioning enables HostPath PV provisioning when running without a + // cloud provider. This allows testing and development of provisioning features. HostPath + // provisioning is not supported in any way, won't work in a multi-node cluster, and + // should not be used for anything other than testing or development. + EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"` + // enableDynamicProvisioning enables the provisioning of volumes when running within an environment + // that supports dynamic provisioning. Defaults to true. + EnableDynamicProvisioning bool `json:"enableDynamicProvisioning"` + // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins. + PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"` + // volumePluginDir is the full path of the directory in which the flex + // volume plugin should search for additional third party volume plugins + FlexVolumePluginDir string `json:"flexVolumePluginDir"` +} + +type PersistentVolumeRecyclerConfiguration struct { + // maximumRetry is number of retries the PV recycler will execute on failure to recycle + // PV. + MaximumRetry int32 `json:"maximumRetry"` + // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler + // pod. + MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"` + // podTemplateFilePathNFS is the file path to a pod definition used as a template for + // NFS persistent volume recycling + PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"` + // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds + // for an NFS scrubber pod. + IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"` + // podTemplateFilePathHostPath is the file path to a pod definition used as a template for + // HostPath persistent volume recycling. This is for development and testing only and + // will not work in a multi-node cluster. + PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"` + // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath + // Recycler pod. This is for development and testing only and will not work in a multi-node + // cluster. + MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"` + // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds + // for a HostPath scrubber pod. This is for development and testing only and will not work + // in a multi-node cluster. + IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go new file mode 100644 index 00000000..fd607760 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go @@ -0,0 +1,182 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration, + Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, + Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration, + Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration, + Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration, + Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { + SetDefaults_KubeProxyConfiguration(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR + out.HealthzBindAddress = in.HealthzBindAddress + out.HealthzPort = in.HealthzPort + out.HostnameOverride = in.HostnameOverride + out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit + out.IPTablesSyncPeriod = in.IPTablesSyncPeriod + out.KubeconfigPath = in.KubeconfigPath + out.MasqueradeAll = in.MasqueradeAll + out.Master = in.Master + out.OOMScoreAdj = in.OOMScoreAdj + out.Mode = componentconfig.ProxyMode(in.Mode) + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + out.UDPIdleTimeout = in.UDPIdleTimeout + out.ConntrackMax = in.ConntrackMax + out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout + return nil +} + +func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s) +} + +func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR + out.HealthzBindAddress = in.HealthzBindAddress + out.HealthzPort = in.HealthzPort + out.HostnameOverride = in.HostnameOverride + out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit + out.IPTablesSyncPeriod = in.IPTablesSyncPeriod + out.KubeconfigPath = in.KubeconfigPath + out.MasqueradeAll = in.MasqueradeAll + out.Master = in.Master + out.OOMScoreAdj = in.OOMScoreAdj + out.Mode = ProxyMode(in.Mode) + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + out.UDPIdleTimeout = in.UDPIdleTimeout + out.ConntrackMax = in.ConntrackMax + out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout + return nil +} + +func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { + SetDefaults_KubeSchedulerConfiguration(in) + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Port = int32(in.Port) + out.Address = in.Address + out.AlgorithmProvider = in.AlgorithmProvider + out.PolicyConfigFile = in.PolicyConfigFile + if err := api.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + return err + } + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = int32(in.KubeAPIBurst) + out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s) +} + +func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Port = int(in.Port) + out.Address = in.Address + out.AlgorithmProvider = in.AlgorithmProvider + out.PolicyConfigFile = in.PolicyConfigFile + if err := api.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { + return err + } + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = int(in.KubeAPIBurst) + out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { + return err + } + return nil +} + +func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { + SetDefaults_LeaderElectionConfiguration(in) + if err := api.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return nil +} + +func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s) +} + +func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + if err := api.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return nil +} + +func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go new file mode 100644 index 00000000..f4f9fc93 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_KubeProxyConfiguration, + DeepCopy_v1alpha1_KubeSchedulerConfiguration, + DeepCopy_v1alpha1_LeaderElectionConfiguration, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.BindAddress = in.BindAddress + out.ClusterCIDR = in.ClusterCIDR + out.HealthzBindAddress = in.HealthzBindAddress + out.HealthzPort = in.HealthzPort + out.HostnameOverride = in.HostnameOverride + if in.IPTablesMasqueradeBit != nil { + in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit + *out = new(int32) + **out = *in + } else { + out.IPTablesMasqueradeBit = nil + } + if err := unversioned.DeepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { + return err + } + out.KubeconfigPath = in.KubeconfigPath + out.MasqueradeAll = in.MasqueradeAll + out.Master = in.Master + if in.OOMScoreAdj != nil { + in, out := in.OOMScoreAdj, &out.OOMScoreAdj + *out = new(int32) + **out = *in + } else { + out.OOMScoreAdj = nil + } + out.Mode = in.Mode + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + if err := unversioned.DeepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { + return err + } + out.ConntrackMax = in.ConntrackMax + if err := unversioned.DeepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Port = in.Port + out.Address = in.Address + out.AlgorithmProvider = in.AlgorithmProvider + out.PolicyConfigFile = in.PolicyConfigFile + if in.EnableProfiling != nil { + in, out := in.EnableProfiling, &out.EnableProfiling + *out = new(bool) + **out = *in + } else { + out.EnableProfiling = nil + } + out.ContentType = in.ContentType + out.KubeAPIQPS = in.KubeAPIQPS + out.KubeAPIBurst = in.KubeAPIBurst + out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains + if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { + if in.LeaderElect != nil { + in, out := in.LeaderElect, &out.LeaderElect + *out = new(bool) + **out = *in + } else { + out.LeaderElect = nil + } + if err := unversioned.DeepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go new file mode 100644 index 00000000..bab6bb3e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -0,0 +1,114 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/kubelet/qos" + "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) { + scheme.AddDefaultingFuncs( + SetDefaults_KubeProxyConfiguration, + SetDefaults_KubeSchedulerConfiguration, + SetDefaults_LeaderElectionConfiguration, + ) +} + +func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { + if obj.BindAddress == "" { + obj.BindAddress = "0.0.0.0" + } + if obj.HealthzPort == 0 { + obj.HealthzPort = 10249 + } + if obj.HealthzBindAddress == "" { + obj.HealthzBindAddress = "127.0.0.1" + } + if obj.OOMScoreAdj == nil { + temp := int32(qos.KubeProxyOOMScoreAdj) + obj.OOMScoreAdj = &temp + } + if obj.ResourceContainer == "" { + obj.ResourceContainer = "/kube-proxy" + } + if obj.IPTablesSyncPeriod.Duration == 0 { + obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second} + } + zero := unversioned.Duration{} + if obj.UDPIdleTimeout == zero { + obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond} + } + if obj.ConntrackMax == 0 { + obj.ConntrackMax = 256 * 1024 // 4x default (64k) + } + if obj.IPTablesMasqueradeBit == nil { + temp := int32(14) + obj.IPTablesMasqueradeBit = &temp + } + if obj.ConntrackTCPEstablishedTimeout == zero { + obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) + } +} + +func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { + if obj.Port == 0 { + obj.Port = ports.SchedulerPort + } + if obj.Address == "" { + obj.Address = "0.0.0.0" + } + if obj.AlgorithmProvider == "" { + obj.AlgorithmProvider = "DefaultProvider" + } + if obj.ContentType == "" { + obj.ContentType = "application/vnd.kubernetes.protobuf" + } + if obj.KubeAPIQPS == 0 { + obj.KubeAPIQPS = 50.0 + } + if obj.KubeAPIBurst == 0 { + obj.KubeAPIBurst = 100 + } + if obj.SchedulerName == "" { + obj.SchedulerName = api.DefaultSchedulerName + } + if obj.HardPodAffinitySymmetricWeight == 0 { + obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight + } + if obj.FailureDomains == "" { + obj.FailureDomains = api.DefaultFailureDomains + } +} + +func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { + zero := unversioned.Duration{} + if obj.LeaseDuration == zero { + obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second} + } + if obj.RenewDeadline == zero { + obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second} + } + if obj.RetryPeriod == zero { + obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second} + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go new file mode 100644 index 00000000..65a03a20 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +genconversion=true +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go new file mode 100644 index 00000000..d74effb7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "componentconfig" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + addDefaultingFuncs(scheme) +} + +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeProxyConfiguration{}, + &KubeSchedulerConfiguration{}, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go new file mode 100644 index 00000000..2ae65d87 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go @@ -0,0 +1,141 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "k8s.io/kubernetes/pkg/api/unversioned" + +type KubeProxyConfiguration struct { + unversioned.TypeMeta + + // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 + // for all interfaces) + BindAddress string `json:"bindAddress"` + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string `json:"clusterCIDR"` + // healthzBindAddress is the IP address for the health check server to serve on, + // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) + HealthzBindAddress string `json:"healthzBindAddress"` + // healthzPort is the port to bind the health check server. Use 0 to disable. + HealthzPort int32 `json:"healthzPort"` + // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. + HostnameOverride string `json:"hostnameOverride"` + // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using + // the pure iptables proxy mode. Values must be within the range [0, 31]. + IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` + // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` + // kubeconfigPath is the path to the kubeconfig file with authorization information (the + // master location is set by the master flag). + KubeconfigPath string `json:"kubeconfigPath"` + // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + MasqueradeAll bool `json:"masqueradeAll"` + // master is the address of the Kubernetes API server (overrides any value in kubeconfig) + Master string `json:"master"` + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within + // the range [-1000, 1000] + OOMScoreAdj *int32 `json:"oomScoreAdj"` + // mode specifies which proxy mode to use. + Mode ProxyMode `json:"mode"` + // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed + // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. + PortRange string `json:"portRange"` + // resourceContainer is the bsolute name of the resource-only container to create and run + // the Kube-proxy in (Default: /kube-proxy). + ResourceContainer string `json:"resourceContainer"` + // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). + // Must be greater than 0. Only applicable for proxyMode=userspace. + UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` + // conntrackMax is the maximum number of NAT connections to track (0 to leave as-is)") + ConntrackMax int32 `json:"conntrackMax"` + // conntrackTCPEstablishedTimeout is how long an idle UDP connection will be kept open + // (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode is Userspace + ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` +} + +// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' +// (experimental). If blank, look at the Node object on the Kubernetes API and respect the +// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the +// best-available proxy (currently userspace, but may change in future versions). If the +// iptables proxy is selected, regardless of how, but the system's kernel or iptables +// versions are insufficient, this always falls back to the userspace proxy. +type ProxyMode string + +const ( + ProxyModeUserspace ProxyMode = "userspace" + ProxyModeIPTables ProxyMode = "iptables" +) + +type KubeSchedulerConfiguration struct { + unversioned.TypeMeta + + // port is the port that the scheduler's http service runs on. + Port int `json:"port"` + // address is the IP address to serve on. + Address string `json:"address"` + // algorithmProvider is the scheduling algorithm provider to use. + AlgorithmProvider string `json:"algorithmProvider"` + // policyConfigFile is the filepath to the scheduler policy configuration. + PolicyConfigFile string `json:"policyConfigFile"` + // enableProfiling enables profiling via web interface. + EnableProfiling *bool `json:"enableProfiling"` + // contentType is contentType of requests sent to apiserver. + ContentType string `json:"contentType"` + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. + KubeAPIQPS float32 `json:"kubeAPIQPS"` + // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. + KubeAPIBurst int `json:"kubeAPIBurst"` + // schedulerName is name of the scheduler, used to select which pods + // will be processed by this scheduler, based on pod's annotation with + // key 'scheduler.alpha.kubernetes.io/name'. + SchedulerName string `json:"schedulerName"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string `json:"failureDomains"` + // leaderElection defines the configuration of leader election client. + LeaderElection LeaderElectionConfiguration `json:"leaderElection"` +} + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaderElect enables a leader election client to gain leadership + // before executing the main loop. Enable this when running replicated + // components for high availability. + LeaderElect *bool `json:"leaderElect"` + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration unversioned.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline unversioned.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod unversioned.Duration `json:"retryPeriod"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go new file mode 100644 index 00000000..d0405c37 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go @@ -0,0 +1,746 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "net" + "reflect" + "regexp" + "strconv" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" + apivalidation "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/labels" + psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/validation" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateThirdPartyResource(update)...) + return allErrs +} + +func ValidateThirdPartyResourceName(name string, prefix bool) []string { + // Make sure it's a valid DNS subdomain + if msgs := apivalidation.NameIsDNSSubdomain(name, prefix); len(msgs) != 0 { + return msgs + } + + // Make sure it's at least three segments (kind + two-segment group name) + if !prefix { + parts := strings.Split(name, ".") + if len(parts) < 3 { + return []string{"must be at least three segments long: .."} + } + } + + return nil +} + +func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...) + + versions := sets.String{} + for ix := range obj.Versions { + version := &obj.Versions[ix] + if len(version.Name) == 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, "must not be empty")) + } else { + for _, msg := range validation.IsDNS1123Label(version.Name) { + allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, msg)) + } + } + if versions.Has(version.Name) { + allErrs = append(allErrs, field.Duplicate(field.NewPath("versions").Index(ix).Child("name"), version)) + } + versions.Insert(version.Name) + } + return allErrs +} + +// ValidateDaemonSet tests if required fields in the DaemonSet are set. +func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. +func ValidateDaemonSetUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) + return allErrs +} + +// validateDaemonSetStatus validates a DaemonSetStatus +func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...) + return allErrs +} + +// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section +func ValidateDaemonSetStatusUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...) + return allErrs +} + +// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. +func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + + selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) + if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) + } + if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for daemonset.")) + } + + allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) + // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. + allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...) + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + return allErrs +} + +// ValidateDaemonSetName can be used to check whether the given daemon set name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateDaemonSetName = apivalidation.NameIsDNSSubdomain + +// Validates that the given name can be used as a deployment name. +var ValidateDeploymentName = apivalidation.NameIsDNSSubdomain + +func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if intOrPercent.Type == intstr.String { + if !validation.IsValidPercent(intOrPercent.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%')")) + } + } else if intOrPercent.Type == intstr.Int { + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...) + } + return allErrs +} + +func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) { + if intOrStringValue.Type != intstr.String || !validation.IsValidPercent(intOrStringValue.StrVal) { + return 0, false + } + value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1]) + return value, true +} + +func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int { + value, isPercent := getPercentValue(intOrStringValue) + if isPercent { + return value + } + return intOrStringValue.IntValue() +} + +func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + value, isPercent := getPercentValue(intOrStringValue) + if !isPercent || value <= 100 { + return nil + } + allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%")) + return allErrs +} + +func ValidateRollingUpdateDeployment(rollingUpdate *extensions.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) + if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 { + // Both MaxSurge and MaxUnavailable cannot be zero. + allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0")) + } + // Validate that MaxUnavailable is not more than 100%. + allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) + return allErrs +} + +func ValidateDeploymentStrategy(strategy *extensions.DeploymentStrategy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch strategy.Type { + case extensions.RecreateDeploymentStrategyType: + if strategy.RollingUpdate != nil { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(extensions.RecreateDeploymentStrategyType+"'"))) + } + case extensions.RollingUpdateDeploymentStrategyType: + // This should never happen since it's set and checked in defaults.go + if strategy.RollingUpdate == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil")) + } else { + allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) + } + default: + validValues := []string{string(extensions.RecreateDeploymentStrategyType), string(extensions.RollingUpdateDeploymentStrategyType)} + allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) + } + return allErrs +} + +func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + v := rollback.Revision + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...) + return allErrs +} + +// Validates given deployment spec. +func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment.")) + } + } + + selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector.")) + } else { + allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) + } + + allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + if spec.RevisionHistoryLimit != nil { + // zero is a valid RevisionHistoryLimit + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) + } + if spec.RollbackTo != nil { + allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...) + } + return allErrs +} + +// Validates given deployment status. +func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...) + return allErrs +} + +func ValidateDeploymentUpdate(update, old *extensions.Deployment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, field.NewPath("status"))...) + return allErrs +} + +func ValidateDeployment(obj *extensions.Deployment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorList { + allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations")) + if len(obj.Name) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required")) + } + allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...) + return allErrs +} + +func ValidateThirdPartyResourceDataUpdate(update, old *extensions.ThirdPartyResourceData) field.ErrorList { + return apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) +} + +func ValidateThirdPartyResourceData(obj *extensions.ThirdPartyResourceData) field.ErrorList { + return apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, apivalidation.NameIsDNSLabel, field.NewPath("metadata")) +} + +// ValidateIngress tests if required fields in the Ingress are set. +func ValidateIngress(ingress *extensions.Ingress) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateIngressName validates that the given name can be used as an Ingress name. +var ValidateIngressName = apivalidation.NameIsDNSSubdomain + +func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: Perform a more thorough validation of spec.TLS.Hosts that takes + // the wildcard spec from RFC 6125 into account. + return allErrs +} + +// ValidateIngressSpec tests if required fields in the IngressSpec are set. +func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: Is a default backend mandatory? + if spec.Backend != nil { + allErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child("backend"))...) + } else if len(spec.Rules) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, "either `backend` or `rules` must be specified")) + } + if len(spec.Rules) > 0 { + allErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child("rules"))...) + } + if len(spec.TLS) > 0 { + allErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child("tls"))...) + } + return allErrs +} + +// ValidateIngressUpdate tests if required fields in the Ingress are set. +func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateIngressStatusUpdate tests if required fields in the Ingress are set when updating status. +func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) + return allErrs +} + +func validateIngressRules(IngressRules []extensions.IngressRule, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(IngressRules) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + for i, ih := range IngressRules { + if len(ih.Host) > 0 { + // TODO: Ports and ips are allowed in the host part of a url + // according to RFC 3986, consider allowing them. + for _, msg := range validation.IsDNS1123Subdomain(ih.Host) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) + } + if isIP := (net.ParseIP(ih.Host) != nil); isIP { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address")) + } + } + allErrs = append(allErrs, validateIngressRuleValue(&ih.IngressRuleValue, fldPath.Index(0))...) + } + return allErrs +} + +func validateIngressRuleValue(ingressRule *extensions.IngressRuleValue, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ingressRule.HTTP != nil { + allErrs = append(allErrs, validateHTTPIngressRuleValue(ingressRule.HTTP, fldPath.Child("http"))...) + } + return allErrs +} + +func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(httpIngressRuleValue.Paths) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("paths"), "")) + } + for i, rule := range httpIngressRuleValue.Paths { + if len(rule.Path) > 0 { + if !strings.HasPrefix(rule.Path, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path")) + } + // TODO: More draconian path regex validation. + // Path must be a valid regex. This is the basic requirement. + // In addition to this any characters not allowed in a path per + // RFC 3986 section-3.3 cannot appear as a literal in the regex. + // Consider the example: http://host/valid?#bar, everything after + // the last '/' is a valid regex that matches valid#bar, which + // isn't a valid path, because the path terminates at the first ? + // or #. A more sophisticated form of validation would detect that + // the user is confusing url regexes with path regexes. + _, err := regexp.CompilePOSIX(rule.Path) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex")) + } + } + allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...) + } + return allErrs +} + +// validateIngressBackend tests if a given backend is valid. +func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // All backends must reference a single local service by name, and a single service port by name or number. + if len(backend.ServiceName) == 0 { + return append(allErrs, field.Required(fldPath.Child("serviceName"), "")) + } else { + for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg)) + } + } + if backend.ServicePort.Type == intstr.String { + for _, msg := range validation.IsDNS1123Label(backend.ServicePort.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, msg)) + } + if !validation.IsValidPortName(backend.ServicePort.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, apivalidation.PortNameErrorMsg)) + } + } else if !validation.IsValidPortNum(backend.ServicePort.IntValue()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort, apivalidation.PortRangeErrorMsg)) + } + return allErrs +} + +func ValidateScale(scale *extensions.Scale) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + + if scale.Spec.Replicas < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "replicas"), scale.Spec.Replicas, "must be greater than or equal to 0")) + } + + return allErrs +} + +// ValidateReplicaSetName can be used to check whether the given ReplicaSet +// name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateReplicaSetName = apivalidation.NameIsDNSSubdomain + +// ValidateReplicaSet tests if required fields in the ReplicaSet are set. +func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set. +func ValidateReplicaSetUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set. +func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.Replicas), field.NewPath("status", "replicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.FullyLabeledReplicas), field.NewPath("status", "fullyLabeledReplicas"))...) + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ObservedGeneration), field.NewPath("status", "observedGeneration"))...) + return allErrs +} + +// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set. +func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + + if spec.Selector == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) + } else { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment.")) + } + } + + selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector.")) + } else { + allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) + } + return allErrs +} + +// Validates the given template and ensures that it is in accordance with the desired selector and replicas. +func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + if !selector.Empty() { + // Verify that the ReplicaSet selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) + if replicas > 1 { + allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + } + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if template.Spec.RestartPolicy != api.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + } + } + return allErrs +} + +// ValidatePodSecurityPolicyName can be used to check whether the given +// pod security policy name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidatePodSecurityPolicyName = apivalidation.NameIsDNSSubdomain + +func ValidatePodSecurityPolicy(psp *extensions.PodSecurityPolicy) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, field.NewPath("spec"))...) + return allErrs +} + +func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...) + allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...) + allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...) + allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...) + allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...) + allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) + allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) + + return allErrs +} + +// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy. +func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList { + allErrs := field.ErrorList{} + + // ensure the selinux strategy has a valid rule + supportedSELinuxRules := sets.NewString(string(extensions.SELinuxStrategyMustRunAs), + string(extensions.SELinuxStrategyRunAsAny)) + if !supportedSELinuxRules.Has(string(seLinux.Rule)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), seLinux.Rule, supportedSELinuxRules.List())) + } + + return allErrs +} + +// validatePSPRunAsUser validates the RunAsUser fields of PodSecurityPolicy. +func validatePSPRunAsUser(fldPath *field.Path, runAsUser *extensions.RunAsUserStrategyOptions) field.ErrorList { + allErrs := field.ErrorList{} + + // ensure the user strategy has a valid rule + supportedRunAsUserRules := sets.NewString(string(extensions.RunAsUserStrategyMustRunAs), + string(extensions.RunAsUserStrategyMustRunAsNonRoot), + string(extensions.RunAsUserStrategyRunAsAny)) + if !supportedRunAsUserRules.Has(string(runAsUser.Rule)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsUser.Rule, supportedRunAsUserRules.List())) + } + + // validate range settings + for idx, rng := range runAsUser.Ranges { + allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) + } + + return allErrs +} + +// validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy. +func validatePSPFSGroup(fldPath *field.Path, groupOptions *extensions.FSGroupStrategyOptions) field.ErrorList { + allErrs := field.ErrorList{} + + supportedRules := sets.NewString( + string(extensions.FSGroupStrategyMustRunAs), + string(extensions.FSGroupStrategyRunAsAny), + ) + if !supportedRules.Has(string(groupOptions.Rule)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) + } + + for idx, rng := range groupOptions.Ranges { + allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) + } + return allErrs +} + +// validatePSPSupplementalGroup validates the SupplementalGroupsStrategyOptions fields of the PodSecurityPolicy. +func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *extensions.SupplementalGroupsStrategyOptions) field.ErrorList { + allErrs := field.ErrorList{} + + supportedRules := sets.NewString( + string(extensions.SupplementalGroupsStrategyRunAsAny), + string(extensions.SupplementalGroupsStrategyMustRunAs), + ) + if !supportedRules.Has(string(groupOptions.Rule)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) + } + + for idx, rng := range groupOptions.Ranges { + allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) + } + return allErrs +} + +// validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy. +func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions.FSType) field.ErrorList { + allErrs := field.ErrorList{} + allowed := psputil.GetAllFSTypesAsSet() + // add in the * value since that is a pseudo type that is not included by default + allowed.Insert(string(extensions.All)) + for _, v := range volumes { + if !allowed.Has(string(v)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List())) + } + } + + return allErrs +} + +// validateIDRanges ensures the range is valid. +func validateIDRanges(fldPath *field.Path, rng extensions.IDRange) field.ErrorList { + allErrs := field.ErrorList{} + + // if 0 <= Min <= Max then we do not need to validate max. It is always greater than or + // equal to 0 and Min. + if rng.Min < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be negative")) + } + if rng.Max < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("max"), rng.Max, "max cannot be negative")) + } + if rng.Min > rng.Max { + allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be greater than max")) + } + + return allErrs +} + +// validatePSPCapsAgainstDrops ensures an allowed cap is not listed in the required drops. +func validatePSPCapsAgainstDrops(requiredDrops []api.Capability, capsToCheck []api.Capability, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requiredDrops == nil { + return allErrs + } + for _, cap := range capsToCheck { + if hasCap(cap, requiredDrops) { + allErrs = append(allErrs, field.Invalid(fldPath, cap, + fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) + } + } + return allErrs +} + +// hasCap checks for needle in haystack. +func hasCap(needle api.Capability, haystack []api.Capability) bool { + for _, c := range haystack { + if needle == c { + return true + } + } + return false +} + +// ValidatePodSecurityPolicyUpdate validates a PSP for updates. +func ValidatePodSecurityPolicyUpdate(old *extensions.PodSecurityPolicy, new *extensions.PodSecurityPolicy) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&old.ObjectMeta, &new.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateNetworkPolicyName can be used to check whether the given networkpolicy +// name is valid. +func ValidateNetworkPolicyName(name string, prefix bool) []string { + return apivalidation.NameIsDNSSubdomain(name, prefix) +} + +// ValidateNetworkPolicySpec tests if required fields in the networkpolicy spec are set. +func ValidateNetworkPolicySpec(spec *extensions.NetworkPolicySpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&spec.PodSelector, fldPath.Child("podSelector"))...) + + // Validate ingress rules. + for _, i := range spec.Ingress { + // TODO: Update From to be a pointer to slice as soon as auto-generation supports it. + for _, f := range i.From { + numFroms := 0 + if f.PodSelector != nil { + numFroms++ + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.PodSelector, fldPath.Child("podSelector"))...) + } + if f.NamespaceSelector != nil { + if numFroms > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, "may not specify more than 1 from type")) + } else { + numFroms++ + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.NamespaceSelector, fldPath.Child("namespaces"))...) + } + } + + if numFroms == 0 { + // At least one of PodSelector and NamespaceSelector must be defined. + allErrs = append(allErrs, field.Required(fldPath, "must specify a from type")) + } + } + } + return allErrs +} + +// ValidateNetworkPolicy validates a networkpolicy. +func ValidateNetworkPolicy(np *extensions.NetworkPolicy) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&np.ObjectMeta, true, ValidateNetworkPolicyName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateNetworkPolicySpec(&np.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateNetworkPolicyUpdate tests if an update to a NetworkPolicy is valid. +func ValidateNetworkPolicyUpdate(update, old *extensions.NetworkPolicy) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) + if !reflect.DeepEqual(update.Spec, old.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to networkpolicy spec are forbidden.")) + } + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go new file mode 100644 index 00000000..390e4b4a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go @@ -0,0 +1,101 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package policy + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + intstr "k8s.io/kubernetes/pkg/util/intstr" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_policy_PodDisruptionBudget, + DeepCopy_policy_PodDisruptionBudgetList, + DeepCopy_policy_PodDisruptionBudgetSpec, + DeepCopy_policy_PodDisruptionBudgetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_policy_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_policy_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_policy_PodDisruptionBudgetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_policy_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(in)) + for i := range in { + if err := DeepCopy_policy_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_policy_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error { + if err := intstr.DeepCopy_intstr_IntOrString(in.MinAvailable, &out.MinAvailable, c); err != nil { + return err + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + return nil +} + +func DeepCopy_policy_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go new file mode 100644 index 00000000..7882a0c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go @@ -0,0 +1,129 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/policy/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/policy" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", policy.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(policy.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + policy.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go new file mode 100644 index 00000000..76ea1a55 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go new file mode 100644 index 00000000..2ecf41bc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // The minimum number of pods that must be available simultaneously. This + // can be either an integer or a string specifying a percentage, e.g. "28%". + MinAvailable intstr.IntOrString `json:"minAvailable,omitempty"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *unversioned.LabelSelector `json:"selector,omitempty"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Whether or not a disruption is currently allowed. + PodDisruptionAllowed bool `json:"disruptionAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods"` +} + +// +genclient=true,noMethods=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty"` +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty"` + Items []PodDisruptionBudget `json:"items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go new file mode 100644 index 00000000..23aaa9a3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go @@ -0,0 +1,183 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + policy "k8s.io/kubernetes/pkg/apis/policy" + conversion "k8s.io/kubernetes/pkg/conversion" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget, + Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget, + Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList, + Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList, + Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec, + Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec, + Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus, + Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if err := Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in, out, s) +} + +func autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]policy.PodDisruptionBudget, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + if err := Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil { + return err + } + out.Selector = in.Selector + return nil +} + +func Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil { + return err + } + out.Selector = in.Selector + return nil +} + +func Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go new file mode 100644 index 00000000..74680aff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go @@ -0,0 +1,102 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" + intstr "k8s.io/kubernetes/pkg/util/intstr" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_PodDisruptionBudget, + DeepCopy_v1alpha1_PodDisruptionBudgetList, + DeepCopy_v1alpha1_PodDisruptionBudgetSpec, + DeepCopy_v1alpha1_PodDisruptionBudgetStatus, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in.Status, &out.Status, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error { + if err := intstr.DeepCopy_intstr_IntOrString(in.MinAvailable, &out.MinAvailable, c); err != nil { + return err + } + if in.Selector != nil { + in, out := in.Selector, &out.Selector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.Selector = nil + } + return nil +} + +func DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error { + out.PodDisruptionAllowed = in.PodDisruptionAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go new file mode 100644 index 00000000..5cb716c2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package policy is for any kind of policy object. Suitable examples, even if +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// NetworkPolicy, etc. +// +genconversion=true +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go new file mode 100644 index 00000000..867a6b0a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go @@ -0,0 +1,903 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto + + It has these top-level messages: + PodDisruptionBudget + PodDisruptionBudgetList + PodDisruptionBudgetSpec + PodDisruptionBudgetStatus +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (m *PodDisruptionBudget) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudget) ProtoMessage() {} + +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (m *PodDisruptionBudgetList) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudgetList) ProtoMessage() {} + +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (m *PodDisruptionBudgetSpec) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} + +func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } +func (m *PodDisruptionBudgetStatus) String() string { return proto.CompactTextString(m) } +func (*PodDisruptionBudgetStatus) ProtoMessage() {} + +func init() { + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetStatus") +} +func (m *PodDisruptionBudget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size())) + n5, err := m.MinAvailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n6, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.PodDisruptionAllowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PodDisruptionBudget) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + var l int + _ = l + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + var l int + _ = l + n += 2 + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PodDisruptionBudget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionAllowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PodDisruptionAllowed = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExpectedPods |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto new file mode 100644 index 00000000..730a0a3e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.policy.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +message PodDisruptionBudget { + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the PodDisruptionBudget. + optional PodDisruptionBudgetSpec spec = 2; + + // Most recently observed status of the PodDisruptionBudget. + optional PodDisruptionBudgetStatus status = 3; +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +message PodDisruptionBudgetList { + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + repeated PodDisruptionBudget items = 2; +} + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +message PodDisruptionBudgetSpec { + // The minimum number of pods that must be available simultaneously. This + // can be either an integer or a string specifying a percentage, e.g. "28%". + optional k8s.io.kubernetes.pkg.util.intstr.IntOrString minAvailable = 1; + + // Label query over pods whose evictions are managed by the disruption + // budget. + optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +message PodDisruptionBudgetStatus { + // Whether or not a disruption is currently allowed. + optional bool disruptionAllowed = 1; + + // current number of healthy pods + optional int32 currentHealthy = 2; + + // minimum desired number of healthy pods + optional int32 desiredHealthy = 3; + + // total number of pods counted by this disruption budget + optional int32 expectedPods = 4; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go new file mode 100644 index 00000000..a6a94d96 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) + /* + addDefaultingFuncs(scheme) + addConversionFuncs(scheme) + */ +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + &v1.ListOptions{}, + &v1.DeleteOptions{}, + ) + // Add the watch version that applies + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go new file mode 100644 index 00000000..1f3265ae --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // The minimum number of pods that must be available simultaneously. This + // can be either an integer or a string specifying a percentage, e.g. "28%". + MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Whether or not a disruption is currently allowed. + PodDisruptionAllowed bool `json:"disruptionAllowed" protobuf:"varint,1,opt,name=disruptionAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,2,opt,name=currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,3,opt,name=desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods" protobuf:"varint,4,opt,name=expectedPods"` +} + +// +genclient=true,noMethods=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + unversioned.TypeMeta `json:",inline"` + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + unversioned.TypeMeta `json:",inline"` + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..8ca1782f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,70 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PodDisruptionBudget = map[string]string{ + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", +} + +func (PodDisruptionBudget) SwaggerDoc() map[string]string { + return map_PodDisruptionBudget +} + +var map_PodDisruptionBudgetList = map[string]string{ + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", +} + +func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetList +} + +var map_PodDisruptionBudgetSpec = map[string]string{ + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "The minimum number of pods that must be available simultaneously. This can be either an integer or a string specifying a percentage, e.g. \"28%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget.", +} + +func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetSpec +} + +var map_PodDisruptionBudgetStatus = map[string]string{ + "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "disruptionAllowed": "Whether or not a disruption is currently allowed.", + "currentHealthy": "current number of healthy pods", + "desiredHealthy": "minimum desired number of healthy pods", + "expectedPods": "total number of pods counted by this disruption budget", +} + +func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go new file mode 100644 index 00000000..5e9339a9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go @@ -0,0 +1,274 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package rbac + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_rbac_ClusterRole, + DeepCopy_rbac_ClusterRoleBinding, + DeepCopy_rbac_ClusterRoleBindingList, + DeepCopy_rbac_ClusterRoleList, + DeepCopy_rbac_PolicyRule, + DeepCopy_rbac_Role, + DeepCopy_rbac_RoleBinding, + DeepCopy_rbac_RoleBindingList, + DeepCopy_rbac_RoleList, + DeepCopy_rbac_Subject, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_rbac_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_rbac_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_rbac_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_rbac_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(in)) + for i := range in { + if err := DeepCopy_rbac_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRole, len(in)) + for i := range in { + if err := DeepCopy_rbac_ClusterRole(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error { + if in.Verbs != nil { + in, out := in.Verbs, &out.Verbs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Verbs = nil + } + if in.AttributeRestrictions == nil { + out.AttributeRestrictions = nil + } else if newVal, err := c.DeepCopy(in.AttributeRestrictions); err != nil { + return err + } else { + out.AttributeRestrictions = newVal.(runtime.Object) + } + if in.APIGroups != nil { + in, out := in.APIGroups, &out.APIGroups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.APIGroups = nil + } + if in.Resources != nil { + in, out := in.Resources, &out.Resources + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Resources = nil + } + if in.ResourceNames != nil { + in, out := in.ResourceNames, &out.ResourceNames + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.ResourceNames = nil + } + if in.NonResourceURLs != nil { + in, out := in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.NonResourceURLs = nil + } + return nil +} + +func DeepCopy_rbac_Role(in Role, out *Role, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_rbac_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_rbac_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_rbac_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]RoleBinding, len(in)) + for i := range in { + if err := DeepCopy_rbac_RoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Role, len(in)) + for i := range in { + if err := DeepCopy_rbac_Role(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_rbac_Subject(in Subject, out *Subject, c *conversion.Cloner) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go new file mode 100644 index 00000000..15f91da2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package rbac diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go new file mode 100644 index 00000000..8cac247f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go @@ -0,0 +1,130 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const importPrefix = "k8s.io/kubernetes/pkg/apis/rbac" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", rbac.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + glog.V(4).Infof("%v", err) + return + } + if err := enableVersions(externalVersions); err != nil { + glog.V(4).Infof("%v", err) + return + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + api.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + rootScoped := sets.NewString( + "ClusterRole", + "ClusterRoleBinding", + ) + + ignoredKinds := sets.NewString() + + return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +// interfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1alpha1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + g, _ := registered.Group(rbac.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + rbac.AddToScheme(api.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1alpha1.SchemeGroupVersion: + v1alpha1.AddToScheme(api.Scheme) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go new file mode 100644 index 00000000..58464d74 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go @@ -0,0 +1,64 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) unversioned.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) unversioned.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func AddToScheme(scheme *runtime.Scheme) { + // Add the API to Scheme. + addKnownTypes(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &api.ListOptions{}, + &api.DeleteOptions{}, + &api.ExportOptions{}, + ) + versioned.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go new file mode 100644 index 00000000..a35eb7db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + UserAll = "*" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + AttributeRestrictions runtime.Object + // APIGroups is the name of the APIGroup that contains the resources. + // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. + APIGroups []string + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + NonResourceURLs []string +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string + // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is + // expected to be API version of this API group. For example "rbac/v1alpha1". + APIVersion string + // Name of the object being referenced. + Name string + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + Namespace string +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + unversioned.TypeMeta + // Standard object's metadata. + api.ObjectMeta + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + unversioned.TypeMeta + api.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef api.ObjectReference +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of roleBindings + Items []RoleBinding +} + +// RoleList is a collection of Roles +type RoleList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of roles + Items []Role +} + +// +genclient=true,nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + unversioned.TypeMeta + // Standard object's metadata. + api.ObjectMeta + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule +} + +// +genclient=true,nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + unversioned.TypeMeta + // Standard object's metadata. + api.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef api.ObjectReference +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + unversioned.TypeMeta + // Standard object's metadata. + unversioned.ListMeta + + // Items is a list of ClusterRoles + Items []ClusterRole +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go new file mode 100644 index 00000000..f176aa09 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go @@ -0,0 +1,536 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + if err := api.Scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, + Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, + Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, + Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, + Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, + Convert_v1alpha1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1alpha1_Role, + Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding, + Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList, + Convert_v1alpha1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1alpha1_RoleList, + Convert_v1alpha1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1alpha1_Subject, + ); err != nil { + // if one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} + +func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]rbac.PolicyRule, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.ClusterRole, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = in.Verbs + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { + return err + } + out.APIGroups = in.APIGroups + out.Resources = in.Resources + out.ResourceNames = in.ResourceNames + out.NonResourceURLs = in.NonResourceURLs + return nil +} + +func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Verbs = in.Verbs + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { + return err + } + out.APIGroups = in.APIGroups + out.Resources = in.Resources + out.ResourceNames = in.ResourceNames + out.NonResourceURLs = in.NonResourceURLs + return nil +} + +func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) +} + +func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]rbac.PolicyRule, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s) +} + +func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.RoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.Role, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Role_To_rbac_Role(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := Convert_rbac_Role_To_v1alpha1_Role(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s) +} + +func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + return autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s) +} + +func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + return autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go new file mode 100644 index 00000000..f898a434 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go @@ -0,0 +1,271 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + if err := api.Scheme.AddGeneratedDeepCopyFuncs( + DeepCopy_v1alpha1_ClusterRole, + DeepCopy_v1alpha1_ClusterRoleBinding, + DeepCopy_v1alpha1_ClusterRoleBindingList, + DeepCopy_v1alpha1_ClusterRoleList, + DeepCopy_v1alpha1_PolicyRule, + DeepCopy_v1alpha1_Role, + DeepCopy_v1alpha1_RoleBinding, + DeepCopy_v1alpha1_RoleBindingList, + DeepCopy_v1alpha1_RoleList, + DeepCopy_v1alpha1_Subject, + ); err != nil { + // if one of the deep copy functions is malformed, detect it immediately. + panic(err) + } +} + +func DeepCopy_v1alpha1_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_v1alpha1_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := v1.DeepCopy_v1_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]ClusterRole, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_ClusterRole(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error { + if in.Verbs != nil { + in, out := in.Verbs, &out.Verbs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Verbs = nil + } + if err := runtime.DeepCopy_runtime_RawExtension(in.AttributeRestrictions, &out.AttributeRestrictions, c); err != nil { + return err + } + if in.APIGroups != nil { + in, out := in.APIGroups, &out.APIGroups + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.APIGroups = nil + } + if in.Resources != nil { + in, out := in.Resources, &out.Resources + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Resources = nil + } + if in.ResourceNames != nil { + in, out := in.ResourceNames, &out.ResourceNames + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.ResourceNames = nil + } + if in.NonResourceURLs != nil { + in, out := in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.NonResourceURLs = nil + } + return nil +} + +func DeepCopy_v1alpha1_Role(in Role, out *Role, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Rules != nil { + in, out := in.Rules, &out.Rules + *out = make([]PolicyRule, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Rules = nil + } + return nil +} + +func DeepCopy_v1alpha1_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Subjects != nil { + in, out := in.Subjects, &out.Subjects + *out = make([]Subject, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Subject(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := v1.DeepCopy_v1_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { + return err + } + return nil +} + +func DeepCopy_v1alpha1_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]RoleBinding, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_RoleBinding(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error { + if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { + return err + } + if in.Items != nil { + in, out := in.Items, &out.Items + *out = make([]Role, len(in)) + for i := range in { + if err := DeepCopy_v1alpha1_Role(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func DeepCopy_v1alpha1_Subject(in Subject, out *Subject, c *conversion.Cloner) error { + out.Kind = in.Kind + out.APIVersion = in.APIVersion + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go new file mode 100644 index 00000000..6873ebb1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +// +genconversion=true +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go new file mode 100644 index 00000000..54b03ed1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go @@ -0,0 +1,2209 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingList + ClusterRoleList + PolicyRule + Role + RoleBinding + RoleBindingList + RoleList + Subject +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (m *ClusterRole) String() string { return proto.CompactTextString(m) } +func (*ClusterRole) ProtoMessage() {} + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (m *ClusterRoleBinding) String() string { return proto.CompactTextString(m) } +func (*ClusterRoleBinding) ProtoMessage() {} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (m *ClusterRoleBindingList) String() string { return proto.CompactTextString(m) } +func (*ClusterRoleBindingList) ProtoMessage() {} + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (m *ClusterRoleList) String() string { return proto.CompactTextString(m) } +func (*ClusterRoleList) ProtoMessage() {} + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (m *PolicyRule) String() string { return proto.CompactTextString(m) } +func (*PolicyRule) ProtoMessage() {} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (m *RoleBinding) String() string { return proto.CompactTextString(m) } +func (*RoleBinding) ProtoMessage() {} + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (m *RoleBindingList) String() string { return proto.CompactTextString(m) } +func (*RoleBindingList) ProtoMessage() {} + +func (m *RoleList) Reset() { *m = RoleList{} } +func (m *RoleList) String() string { return proto.CompactTextString(m) } +func (*RoleList) ProtoMessage() {} + +func (m *Subject) Reset() { *m = Subject{} } +func (m *Subject) String() string { return proto.CompactTextString(m) } +func (*Subject) ProtoMessage() {} + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleList") + proto.RegisterType((*Subject)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AttributeRestrictions.Size())) + n6, err := m.AttributeRestrictions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AttributeRestrictions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AttributeRestrictions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto new file mode 100644 index 00000000..1b23f714 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto @@ -0,0 +1,158 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + optional k8s.io.kubernetes.pkg.runtime.RawExtension attributeRestrictions = 2; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + repeated string resourceNames = 5; + + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + repeated string nonResourceURLs = 6; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIVersion holds the API group and version of the referenced object. + optional string apiVersion = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go new file mode 100644 index 00000000..eadcb4fb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: rbac.GroupName, Version: "v1alpha1"} + +func AddToScheme(scheme *runtime.Scheme) { + addKnownTypes(scheme) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &v1.ListOptions{}, + &v1.DeleteOptions{}, + &v1.ExportOptions{}, + ) + versioned.AddToGroupVersion(scheme, SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go new file mode 100644 index 00000000..52eacfe3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + AttributeRestrictions runtime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + APIGroups []string `json:"apiGroups" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIVersion holds the API group and version of the referenced object. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true,nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true,nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..b88c93c0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,138 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiVersion": "APIVersion holds the API group and version of the referenced object.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go b/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go new file mode 100644 index 00000000..3cb077fa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go @@ -0,0 +1,600 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/util/sets" + + "github.com/golang/glog" +) + +// NewDeltaFIFO returns a Store which can be used process changes to items. +// +// keyFunc is used to figure out what key an object should have. (It's +// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.) +// +// 'compressor' may compress as many or as few items as it wants +// (including returning an empty slice), but it should do what it +// does quickly since it is called while the queue is locked. +// 'compressor' may be nil if you don't want any delta compression. +// +// 'keyLister' is expected to return a list of keys that the consumer of +// this queue "knows about". It is used to decide which items are missing +// when Replace() is called; 'Deleted' deltas are produced for these items. +// It may be nil if you don't need to detect all deletions. +// TODO: consider merging keyLister with this object, tracking a list of +// "known" keys when Pop() is called. Have to think about how that +// affects error retrying. +// TODO(lavalamp): I believe there is a possible race only when using an +// external known object source that the above TODO would +// fix. +// +// Also see the comment on DeltaFIFO. +func NewDeltaFIFO(keyFunc KeyFunc, compressor DeltaCompressor, knownObjects KeyListerGetter) *DeltaFIFO { + f := &DeltaFIFO{ + items: map[string]Deltas{}, + queue: []string{}, + keyFunc: keyFunc, + deltaCompressor: compressor, + knownObjects: knownObjects, + } + f.cond.L = &f.lock + return f +} + +// DeltaFIFO is like FIFO, but allows you to process deletes. +// +// DeltaFIFO is a producer-consumer queue, where a Reflector is +// intended to be the producer, and the consumer is whatever calls +// the Pop() method. +// +// DeltaFIFO solves this use case: +// * You want to process every object change (delta) at most once. +// * When you process an object, you want to see everything +// that's happened to it since you last processed it. +// * You want to process the deletion of objects. +// * You might want to periodically reprocess objects. +// +// DeltaFIFO's Pop(), Get(), and GetByKey() methods return +// interface{} to satisfy the Store/Queue interfaces, but it +// will always return an object of type Deltas. +// +// A note on threading: If you call Pop() in parallel from multiple +// threads, you could end up with multiple threads processing slightly +// different versions of the same object. +// +// A note on the KeyLister used by the DeltaFIFO: It's main purpose is +// to list keys that are "known", for the purpose of figuring out which +// items have been deleted when Replace() or Delete() are called. The deleted +// object will be included in the DeleteFinalStateUnknown markers. These objects +// could be stale. +// +// You may provide a function to compress deltas (e.g., represent a +// series of Updates as a single Update). +type DeltaFIFO struct { + // lock/cond protects access to 'items' and 'queue'. + lock sync.RWMutex + cond sync.Cond + + // We depend on the property that items in the set are in + // the queue and vice versa, and that all Deltas in this + // map have at least one Delta. + items map[string]Deltas + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item + // insertion and retrieval, and should be deterministic. + keyFunc KeyFunc + + // deltaCompressor tells us how to combine two or more + // deltas. It may be nil. + deltaCompressor DeltaCompressor + + // knownObjects list keys that are "known", for the + // purpose of figuring out which items have been deleted + // when Replace() or Delete() is called. + knownObjects KeyListerGetter +} + +var ( + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue +) + +var ( + // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas + // object with zero length is encountered (should be impossible, + // even if such an object is accidentally produced by a DeltaCompressor-- + // but included for completeness). + ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") +) + +// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or +// DeletedFinalStateUnknown objects. +func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { + if d, ok := obj.(Deltas); ok { + if len(d) == 0 { + return "", KeyError{obj, ErrZeroLengthDeltasObject} + } + obj = d.Newest().Object + } + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return f.keyFunc(obj) +} + +// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// or an Update called first but the first batch of items inserted by Replace() has been popped +func (f *DeltaFIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *DeltaFIFO) Add(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Added, obj) +} + +// Update is just like Add, but makes an Updated Delta. +func (f *DeltaFIFO) Update(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Updated, obj) +} + +// Delete is just like Add, but makes an Deleted Delta. If the item does not +// already exist, it will be ignored. (It may have already been deleted by a +// Replace (re-list), for example. +func (f *DeltaFIFO) Delete(obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if f.knownObjects == nil { + if _, exists := f.items[id]; !exists { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } else { + // We only want to skip the "deletion" action if the object doesn't + // exist in knownObjects and it doesn't have corresponding item in items. + // Note that even if there is a "deletion" action in items, we can ignore it, + // because it will be deduped automatically in "queueActionLocked" + _, exists, err := f.knownObjects.GetByKey(id) + _, itemsExist := f.items[id] + if err == nil && !exists && !itemsExist { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + // TODO(lavalamp): This may be racy-- we aren't properly locked + // with knownObjects. + return nil + } + } + + return f.queueActionLocked(Deleted, obj) +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +// +// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is +// different from the Add/Update/Delete functions. +func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { + deltas, ok := obj.(Deltas) + if !ok { + return fmt.Errorf("object must be of type deltas, but got: %#v", obj) + } + id, err := f.KeyOf(deltas.Newest().Object) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; exists { + return nil + } + + f.queue = append(f.queue, id) + f.items[id] = deltas + f.cond.Broadcast() + return nil +} + +// re-listing and watching can deliver the same update multiple times in any +// order. This will combine the most recent two deltas if they are the same. +func dedupDeltas(deltas Deltas) Deltas { + n := len(deltas) + if n < 2 { + return deltas + } + a := &deltas[n-1] + b := &deltas[n-2] + if out := isDup(a, b); out != nil { + d := append(Deltas{}, deltas[:n-2]...) + return append(d, *out) + } + return deltas +} + +// If a & b represent the same event, returns the delta that ought to be kept. +// Otherwise, returns nil. +// TODO: is there anything other than deletions that need deduping? +func isDup(a, b *Delta) *Delta { + if out := isDeletionDup(a, b); out != nil { + return out + } + // TODO: Detect other duplicate situations? Are there any? + return nil +} + +// keep the one with the most information if both are deletions. +func isDeletionDup(a, b *Delta) *Delta { + if b.Type != Deleted || a.Type != Deleted { + return nil + } + // Do more sophisticated checks, or is this sufficient? + if _, ok := b.Object.(DeletedFinalStateUnknown); ok { + return a + } + return b +} + +// willObjectBeDeletedLocked returns true only if the last delta for the +// given object is Delete. Caller must lock first. +func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool { + deltas := f.items[id] + return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted +} + +// queueActionLocked appends to the delta list for the object, calling +// f.deltaCompressor if needed. Caller must lock first. +func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + + // If object is supposed to be deleted (last event is Deleted), + // then we should ignore Sync events, because it would result in + // recreation of this object. + if actionType == Sync && f.willObjectBeDeletedLocked(id) { + return nil + } + + newDeltas := append(f.items[id], Delta{actionType, obj}) + newDeltas = dedupDeltas(newDeltas) + if f.deltaCompressor != nil { + newDeltas = f.deltaCompressor.Compress(newDeltas) + } + + _, exists := f.items[id] + if len(newDeltas) > 0 { + if !exists { + f.queue = append(f.queue, id) + } + f.items[id] = newDeltas + f.cond.Broadcast() + } else if exists { + // The compression step removed all deltas, so + // we need to remove this from our map (extra items + // in the queue are ignored if they are not in the + // map). + delete(f.items, id) + } + return nil +} + +// List returns a list of all the items; it returns the object +// from the most recent Delta. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + return f.listLocked() +} + +func (f *DeltaFIFO) listLocked() []interface{} { + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + // Copy item's slice so operations on this slice (delta + // compression) won't interfere with the object we return. + item = copyDeltas(item) + list = append(list, item.Newest().Object) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *DeltaFIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the complete list of deltas for the requested item, +// or sets exists=false. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.KeyOf(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the complete list of deltas for the requested item, +// setting exists=false if that list is empty. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + d, exists := f.items[key] + if exists { + // Copy item's slice so operations on this slice (delta + // compression) won't interfere with the object we return. + d = copyDeltas(d) + } + return d, exists, nil +} + +// Pop blocks until an item is added to the queue, and then returns it. If +// multiple items are ready, they are returned in the order in which they were +// added/updated. The item is removed from the queue (and the store) before it +// is returned, so if you don't successfully process it, you need to add it back +// with AddIfNotPresent(). +// process function is called under lock, so it is safe update data structures +// in it that need to be in sync with the queue (e.g. knownKeys). +// +// Pop returns a 'Deltas', which has a complete list of all the things +// that happened to the object (deltas) while it was sitting in the queue. +func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + item, ok := f.items[id] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + // Don't need to copyDeltas here, because we're transferring + // ownership to the caller. + return item, process(item) + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { + f.lock.Lock() + defer f.lock.Unlock() + keys := make(sets.String, len(list)) + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(list) + } + + for _, item := range list { + key, err := f.KeyOf(item) + if err != nil { + return KeyError{item, err} + } + keys.Insert(key) + if err := f.queueActionLocked(Sync, item); err != nil { + return fmt.Errorf("couldn't enqueue object: %v", err) + } + } + + if f.knownObjects == nil { + // Do deletion detection against our own list. + for k, oldItem := range f.items { + if keys.Has(k) { + continue + } + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + } + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + return nil + } + + // Detect deletions not already in the queue. + // TODO(lavalamp): This may be racy-- we aren't properly locked + // with knownObjects. Unproven. + knownKeys := f.knownObjects.ListKeys() + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + return nil +} + +// Resync will send a sync event for each item +func (f *DeltaFIFO) Resync() error { + f.lock.RLock() + defer f.lock.RUnlock() + for _, k := range f.knownObjects.ListKeys() { + obj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, k) + continue + } else if !exists { + glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", k) + continue + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + } + return nil +} + +// A KeyListerGetter is anything that knows how to list its keys and look up by key. +type KeyListerGetter interface { + KeyLister + KeyGetter +} + +// A KeyLister is anything that knows how to list its keys. +type KeyLister interface { + ListKeys() []string +} + +// A KeyGetter is anything that knows how to get the value stored under a given key. +type KeyGetter interface { + GetByKey(key string) (interface{}, bool, error) +} + +// DeltaCompressor is an algorithm that removes redundant changes. +type DeltaCompressor interface { + Compress(Deltas) Deltas +} + +// DeltaCompressorFunc should remove redundant changes; but changes that +// are redundant depend on one's desired semantics, so this is an +// injectable function. +// +// DeltaCompressorFunc adapts a raw function to be a DeltaCompressor. +type DeltaCompressorFunc func(Deltas) Deltas + +// Compress just calls dc. +func (dc DeltaCompressorFunc) Compress(d Deltas) Deltas { + return dc(d) +} + +// DeltaType is the type of a change (addition, deletion, etc) +type DeltaType string + +const ( + Added DeltaType = "Added" + Updated DeltaType = "Updated" + Deleted DeltaType = "Deleted" + // The other types are obvious. You'll get Sync deltas when: + // * A watch expires/errors out and a new list/watch cycle is started. + // * You've turned on periodic syncs. + // (Anything that trigger's DeltaFIFO's Replace() method.) + Sync DeltaType = "Sync" +) + +// Delta is the type stored by a DeltaFIFO. It tells you what change +// happened, and the object's state after* that change. +// +// [*] Unless the change is a deletion, and then you'll get the final +// state of the object before it was deleted. +type Delta struct { + Type DeltaType + Object interface{} +} + +// Deltas is a list of one or more 'Delta's to an individual object. +// The oldest delta is at index 0, the newest delta is the last one. +type Deltas []Delta + +// Oldest is a convenience function that returns the oldest delta, or +// nil if there are no deltas. +func (d Deltas) Oldest() *Delta { + if len(d) > 0 { + return &d[0] + } + return nil +} + +// Newest is a convenience function that returns the newest delta, or +// nil if there are no deltas. +func (d Deltas) Newest() *Delta { + if n := len(d); n > 0 { + return &d[n-1] + } + return nil +} + +// copyDeltas returns a shallow copy of d; that is, it copies the slice but not +// the objects in the slice. This allows Get/List to return an object that we +// know won't be clobbered by a subsequent call to a delta compressor. +func copyDeltas(d Deltas) Deltas { + d2 := make(Deltas, len(d)) + copy(d2, d) + return d2 +} + +// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where +// an object was deleted but the watch deletion event was missed. In this +// case we don't know the final "resting" state of the object, so there's +// a chance the included `Obj` is stale. +type DeletedFinalStateUnknown struct { + Key string + Obj interface{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go b/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go new file mode 100644 index 00000000..16600cf2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache is a client-side caching mechanism. It is useful for +// reducing the number of server calls you'd otherwise need to make. +// Reflector watches a server and updates a Store. Two stores are provided; +// one that simply caches objects (for example, to allow a scheduler to +// list currently available nodes), and one that additionally acts as +// a FIFO queue (for example, to allow a scheduler to process incoming +// pods). +package cache diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go b/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go new file mode 100644 index 00000000..ad8684e8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go @@ -0,0 +1,208 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util" +) + +// ExpirationCache implements the store interface +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// Note that the ExpirationCache is inherently slower than a normal +// threadSafeStore because it takes a write lock every time it checks if +// an item has expired. +type ExpirationCache struct { + cacheStorage ThreadSafeStore + keyFunc KeyFunc + clock util.Clock + expirationPolicy ExpirationPolicy + // expirationLock is a write lock used to guarantee that we don't clobber + // newly inserted objects because of a stale expiration timestamp comparison + expirationLock sync.Mutex +} + +// ExpirationPolicy dictates when an object expires. Currently only abstracted out +// so unittests don't rely on the system clock. +type ExpirationPolicy interface { + IsExpired(obj *timestampedEntry) bool +} + +// TTLPolicy implements a ttl based ExpirationPolicy. +type TTLPolicy struct { + // >0: Expire entries with an age > ttl + // <=0: Don't expire any entry + Ttl time.Duration + + // Clock used to calculate ttl expiration + Clock util.Clock +} + +// IsExpired returns true if the given object is older than the ttl, or it can't +// determine its age. +func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool { + return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl +} + +// timestampedEntry is the only type allowed in a ExpirationCache. +type timestampedEntry struct { + obj interface{} + timestamp time.Time +} + +// getTimestampedEntry returnes the timestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { + item, _ := c.cacheStorage.Get(key) + if tsEntry, ok := item.(*timestampedEntry); ok { + return tsEntry, true + } + return nil, false +} + +// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't +// already expired. It holds a write lock across deletion. +func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { + // Prevent all inserts from the time we deem an item as "expired" to when we + // delete it, so an un-expired item doesn't sneak in under the same key, just + // before the Delete. + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + timestampedItem, exists := c.getTimestampedEntry(key) + if !exists { + return nil, false + } + if c.expirationPolicy.IsExpired(timestampedItem) { + glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + c.cacheStorage.Delete(key) + return nil, false + } + return timestampedItem.obj, true +} + +// GetByKey returns the item stored under the key, or sets exists=false. +func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// Get returns unexpired items. It purges the cache of expired items in the +// process. +func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// List retrieves a list of unexpired items. It purges the cache of expired +// items in the process. +func (c *ExpirationCache) List() []interface{} { + items := c.cacheStorage.List() + + list := make([]interface{}, 0, len(items)) + for _, item := range items { + obj := item.(*timestampedEntry).obj + if key, err := c.keyFunc(obj); err != nil { + list = append(list, obj) + } else if obj, exists := c.getOrExpire(key); exists { + list = append(list, obj) + } + } + return list +} + +// ListKeys returns a list of all keys in the expiration cache. +func (c *ExpirationCache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// Add timestamps an item and inserts it into the cache, overwriting entries +// that might exist under the same key. +func (c *ExpirationCache) Add(obj interface{}) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) + return nil +} + +// Update has not been implemented yet for lack of a use case, so this method +// simply calls `Add`. This effectively refreshes the timestamp. +func (c *ExpirationCache) Update(obj interface{}) error { + return c.Add(obj) +} + +// Delete removes an item from the cache. +func (c *ExpirationCache) Delete(obj interface{}) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// Replace will convert all items in the given list to TimestampedEntries +// before attempting the replace operation. The replace operation will +// delete the contents of the ExpirationCache `c`. +func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + items := map[string]interface{}{} + ts := c.clock.Now() + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = ×tampedEntry{item, ts} + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync will touch all objects to put them into the processing queue +func (c *ExpirationCache) Resync() error { + return c.cacheStorage.Resync() +} + +// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy +func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return &ExpirationCache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + clock: util.RealClock{}, + expirationPolicy: &TTLPolicy{ttl, util.RealClock{}}, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go b/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go new file mode 100644 index 00000000..3b959770 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" +) + +type fakeThreadSafeMap struct { + ThreadSafeStore + deletedKeys chan<- string +} + +func (c *fakeThreadSafeMap) Delete(key string) { + if c.deletedKeys != nil { + c.ThreadSafeStore.Delete(key) + c.deletedKeys <- key + } +} + +type FakeExpirationPolicy struct { + NeverExpire sets.String + RetrieveKeyFunc KeyFunc +} + +func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { + key, _ := p.RetrieveKeyFunc(obj) + return !p.NeverExpire.Has(key) +} + +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock util.Clock) Store { + cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) + return &ExpirationCache{ + cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, + keyFunc: keyFunc, + clock: cacheClock, + expirationPolicy: expirationPolicy, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go b/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go new file mode 100644 index 00000000..ccd69ef7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// FakeStore lets you define custom functions for store operations +type FakeCustomStore struct { + AddFunc func(obj interface{}) error + UpdateFunc func(obj interface{}) error + DeleteFunc func(obj interface{}) error + ListFunc func() []interface{} + ListKeysFunc func() []string + GetFunc func(obj interface{}) (item interface{}, exists bool, err error) + GetByKeyFunc func(key string) (item interface{}, exists bool, err error) + ReplaceFunc func(list []interface{}, resourceVerion string) error + ResyncFunc func() error +} + +// Add calls the custom Add function if defined +func (f *FakeCustomStore) Add(obj interface{}) error { + if f.AddFunc != nil { + return f.AddFunc(obj) + } + return nil +} + +// Update calls the custom Update function if defined +func (f *FakeCustomStore) Update(obj interface{}) error { + if f.UpdateFunc != nil { + return f.Update(obj) + } + return nil +} + +// Delete calls the custom Delete function if defined +func (f *FakeCustomStore) Delete(obj interface{}) error { + if f.DeleteFunc != nil { + return f.DeleteFunc(obj) + } + return nil +} + +// List calls the custom List function if defined +func (f *FakeCustomStore) List() []interface{} { + if f.ListFunc != nil { + return f.ListFunc() + } + return nil +} + +// ListKeys calls the custom ListKeys function if defined +func (f *FakeCustomStore) ListKeys() []string { + if f.ListKeysFunc != nil { + return f.ListKeysFunc() + } + return nil +} + +// Get calls the custom Get function if defined +func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + if f.GetFunc != nil { + return f.GetFunc(obj) + } + return nil, false, nil +} + +// GetByKey calls the custom GetByKey function if defined +func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { + if f.GetByKeyFunc != nil { + return f.GetByKeyFunc(key) + } + return nil, false, nil +} + +// Replace calls the custom Replace function if defined +func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { + if f.ReplaceFunc != nil { + return f.ReplaceFunc(list, resourceVersion) + } + return nil +} + +// Resync calls the custom Resync function if defined +func (f *FakeCustomStore) Resync() error { + if f.ResyncFunc != nil { + return f.ResyncFunc() + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go b/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go new file mode 100644 index 00000000..eaa35e62 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go @@ -0,0 +1,294 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + + "k8s.io/kubernetes/pkg/util/sets" +) + +// PopProcessFunc is passed to Pop() method of Queue interface. +// It is supposed to process the element popped from the queue. +type PopProcessFunc func(interface{}) error + +// Queue is exactly like a Store, but has a Pop() method too. +type Queue interface { + Store + + // Pop blocks until it has something to process. + // It returns the object that was process and the result of processing. + Pop(PopProcessFunc) (interface{}, error) + + // AddIfNotPresent adds a value previously + // returned by Pop back into the queue as long + // as nothing else (presumably more recent) + // has since been added. + AddIfNotPresent(interface{}) error + + // Return true if the first batch of items has been popped + HasSynced() bool +} + +// Helper function for popping from Queue. +// WARNING: Do NOT use this function in non-test code to avoid races +// unless you really really really really know what you are doing. +func Pop(queue Queue) interface{} { + var result interface{} + queue.Pop(func(obj interface{}) error { + result = obj + return nil + }) + return result +} + +// FIFO receives adds and updates from a Reflector, and puts them in a queue for +// FIFO order processing. If multiple adds/updates of a single item happen while +// an item is in the queue before it has been processed, it will only be +// processed once, and when it is processed, the most recent version will be +// processed. This can't be done with a channel. +// +// FIFO solves this use case: +// * You want to process every object (exactly) once. +// * You want to process the most recent version of the object when you process it. +// * You do not want to process deleted objects, they should be removed from the queue. +// * You do not want to periodically reprocess objects. +// Compare with DeltaFIFO for other use cases. +type FIFO struct { + lock sync.RWMutex + cond sync.Cond + // We depend on the property that items in the set are in the queue and vice versa. + items map[string]interface{} + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc +} + +var ( + _ = Queue(&FIFO{}) // FIFO is a Queue +) + +// Return true if an Add/Update/Delete/AddIfNotPresent are called first, +// or an Update called first but the first batch of items inserted by Replace() has been popped +func (f *FIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *FIFO) Add(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = obj + f.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (f *FIFO) AddIfNotPresent(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; exists { + return nil + } + + f.queue = append(f.queue, id) + f.items[id] = obj + f.cond.Broadcast() + return nil +} + +// Update is the same as Add in this implementation. +func (f *FIFO) Update(obj interface{}) error { + return f.Add(obj) +} + +// Delete removes an item. It doesn't add it to the queue, because +// this implementation assumes the consumer only cares about the objects, +// not the order in which they were created/added. +func (f *FIFO) Delete(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + delete(f.items, id) + return err +} + +// List returns a list of all the items. +func (f *FIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *FIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + item, exists = f.items[key] + return item, exists, nil +} + +// Pop waits until an item is ready and processes it. If multiple items are +// ready, they are returned in the order in which they were added/updated. +// The item is removed from the queue (and the store) before it is processed, +// so if you don't successfully process it, it should be added back with +// AddIfNotPresent(). process function is called under lock, so it is safe +// update data structures in it that need to be in sync with the queue. +func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + return item, process(item) + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { + items := map[string]interface{}{} + for _, item := range list { + key, err := f.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + + f.lock.Lock() + defer f.lock.Unlock() + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(items) + } + + f.items = items + f.queue = f.queue[:0] + for id := range items { + f.queue = append(f.queue, id) + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// Resync will touch all objects to put them into the processing queue +func (f *FIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + inQueue := sets.NewString() + for _, id := range f.queue { + inQueue.Insert(id) + } + for id := range f.items { + if !inQueue.Has(id) { + f.queue = append(f.queue, id) + } + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// NewFIFO returns a Store which can be used to queue up items to +// process. +func NewFIFO(keyFunc KeyFunc) *FIFO { + f := &FIFO{ + items: map[string]interface{}{}, + queue: []string{}, + keyFunc: keyFunc, + } + f.cond.L = &f.lock + return f +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/index.go b/vendor/k8s.io/kubernetes/pkg/client/cache/index.go new file mode 100644 index 00000000..572f2c06 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/index.go @@ -0,0 +1,82 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/util/sets" +) + +// Indexer is a storage interface that lets you list objects using multiple indexing functions +type Indexer interface { + Store + // Retrieve list of objects that match on the named indexing function + Index(indexName string, obj interface{}) ([]interface{}, error) + // ListIndexFuncValues returns the list of generated values of an Index func + ListIndexFuncValues(indexName string) []string + // ByIndex lists object that match on the named indexing function with the exact key + ByIndex(indexName, indexKey string) ([]interface{}, error) + // GetIndexer return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error +} + +// IndexFunc knows how to provide an indexed value for an object. +type IndexFunc func(obj interface{}) ([]string, error) + +// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns +// unique values for every object. This is conversion can create errors when more than one key is found. You +// should prefer to make proper key and index functions. +func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { + return func(obj interface{}) (string, error) { + indexKeys, err := indexFunc(obj) + if err != nil { + return "", err + } + if len(indexKeys) > 1 { + return "", fmt.Errorf("too many keys: %v", indexKeys) + } + return indexKeys[0], nil + } +} + +const ( + NamespaceIndex string = "namespace" +) + +// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} + +// Index maps the indexed value to a set of keys in the store that match on that value +type Index map[string]sets.String + +// Indexers maps a name to a IndexFunc +type Indexers map[string]IndexFunc + +// Indices maps a name to an Index +type Indices map[string]Index diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go new file mode 100644 index 00000000..90c9c62f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go @@ -0,0 +1,672 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/labels" +) + +// TODO: generate these classes and methods for all resources of interest using +// a script. Can use "go generate" once 1.4 is supported by all users. + +// StoreToPodLister makes a Store have the List method of the client.PodInterface +// The Store must contain (only) Pods. +// +// Example: +// s := cache.NewStore() +// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"} +// r := cache.NewReflector(lw, &api.Pod{}, s).Run() +// l := StoreToPodLister{s} +// l.List() +type StoreToPodLister struct { + Indexer +} + +// Please note that selector is filtering among the pods that have gotten into +// the store; there may have been some filtering that already happened before +// that. +// +// TODO: converge on the interface in pkg/client. +func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err error) { + // TODO: it'd be great to just call + // s.Pods(api.NamespaceAll).List(selector), however then we'd have to + // remake the list.Items as a []*api.Pod. So leave this separate for + // now. + for _, m := range s.Indexer.List() { + pod := m.(*api.Pod) + if selector.Matches(labels.Set(pod.Labels)) { + pods = append(pods, pod) + } + } + return pods, nil +} + +// Pods is taking baby steps to be more like the api in pkg/client +func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer { + return storePodsNamespacer{s.Indexer, namespace} +} + +type storePodsNamespacer struct { + indexer Indexer + namespace string +} + +// Please note that selector is filtering among the pods that have gotten into +// the store; there may have been some filtering that already happened before +// that. +func (s storePodsNamespacer) List(selector labels.Selector) (api.PodList, error) { + pods := api.PodList{} + + if s.namespace == api.NamespaceAll { + for _, m := range s.indexer.List() { + pod := m.(*api.Pod) + if selector.Matches(labels.Set(pod.Labels)) { + pods.Items = append(pods.Items, *pod) + } + } + return pods, nil + } + + key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} + items, err := s.indexer.Index(NamespaceIndex, key) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range s.indexer.List() { + pod := m.(*api.Pod) + if s.namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) { + pods.Items = append(pods.Items, *pod) + } + } + return pods, nil + } + for _, m := range items { + pod := m.(*api.Pod) + if selector.Matches(labels.Set(pod.Labels)) { + pods.Items = append(pods.Items, *pod) + } + } + return pods, nil +} + +// Exists returns true if a pod matching the namespace/name of the given pod exists in the store. +func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) { + _, exists, err := s.Indexer.Get(pod) + if err != nil { + return false, err + } + return exists, nil +} + +// NodeConditionPredicate is a function that indicates whether the given node's conditions meet +// some set of criteria defined by the function. +type NodeConditionPredicate func(node api.Node) bool + +// StoreToNodeLister makes a Store have the List method of the client.NodeInterface +// The Store must contain (only) Nodes. +type StoreToNodeLister struct { + Store +} + +func (s *StoreToNodeLister) List() (machines api.NodeList, err error) { + for _, m := range s.Store.List() { + machines.Items = append(machines.Items, *(m.(*api.Node))) + } + return machines, nil +} + +// NodeCondition returns a storeToNodeConditionLister +func (s *StoreToNodeLister) NodeCondition(predicate NodeConditionPredicate) storeToNodeConditionLister { + // TODO: Move this filtering server side. Currently our selectors don't facilitate searching through a list so we + // have the reflector filter out the Unschedulable field and sift through node conditions in the lister. + return storeToNodeConditionLister{s.Store, predicate} +} + +// storeToNodeConditionLister filters and returns nodes matching the given type and status from the store. +type storeToNodeConditionLister struct { + store Store + predicate NodeConditionPredicate +} + +// List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister. +func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) { + for _, m := range s.store.List() { + node := *m.(*api.Node) + if s.predicate(node) { + nodes.Items = append(nodes.Items, node) + } else { + glog.V(5).Infof("Node %s matches none of the conditions", node.Name) + } + } + return +} + +// StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers. +type StoreToReplicationControllerLister struct { + Indexer +} + +// Exists checks if the given rc exists in the store. +func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationController) (bool, error) { + _, exists, err := s.Indexer.Get(controller) + if err != nil { + return false, err + } + return exists, nil +} + +// StoreToReplicationControllerLister lists all controllers in the store. +// TODO: converge on the interface in pkg/client +func (s *StoreToReplicationControllerLister) List() (controllers []api.ReplicationController, err error) { + for _, c := range s.Indexer.List() { + controllers = append(controllers, *(c.(*api.ReplicationController))) + } + return controllers, nil +} + +func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer { + return storeReplicationControllersNamespacer{s.Indexer, namespace} +} + +type storeReplicationControllersNamespacer struct { + indexer Indexer + namespace string +} + +func (s storeReplicationControllersNamespacer) List(selector labels.Selector) ([]api.ReplicationController, error) { + controllers := []api.ReplicationController{} + + if s.namespace == api.NamespaceAll { + for _, m := range s.indexer.List() { + rc := *(m.(*api.ReplicationController)) + if selector.Matches(labels.Set(rc.Labels)) { + controllers = append(controllers, rc) + } + } + return controllers, nil + } + + key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} + items, err := s.indexer.Index(NamespaceIndex, key) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range s.indexer.List() { + rc := *(m.(*api.ReplicationController)) + if s.namespace == rc.Namespace && selector.Matches(labels.Set(rc.Labels)) { + controllers = append(controllers, rc) + } + } + return controllers, nil + } + for _, m := range items { + rc := *(m.(*api.ReplicationController)) + if selector.Matches(labels.Set(rc.Labels)) { + controllers = append(controllers, rc) + } + } + return controllers, nil +} + +// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. +func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { + var selector labels.Selector + var rc api.ReplicationController + + if len(pod.Labels) == 0 { + err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) + return + } + + key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}} + items, err := s.Indexer.Index(NamespaceIndex, key) + if err != nil { + return + } + + for _, m := range items { + rc = *m.(*api.ReplicationController) + labelSet := labels.Set(rc.Spec.Selector) + selector = labels.Set(rc.Spec.Selector).AsSelector() + + // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. + if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + controllers = append(controllers, rc) + } + if len(controllers) == 0 { + err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// StoreToDeploymentLister gives a store List and Exists methods. The store must contain only Deployments. +type StoreToDeploymentLister struct { + Store +} + +// Exists checks if the given deployment exists in the store. +func (s *StoreToDeploymentLister) Exists(deployment *extensions.Deployment) (bool, error) { + _, exists, err := s.Store.Get(deployment) + if err != nil { + return false, err + } + return exists, nil +} + +// StoreToDeploymentLister lists all deployments in the store. +// TODO: converge on the interface in pkg/client +func (s *StoreToDeploymentLister) List() (deployments []extensions.Deployment, err error) { + for _, c := range s.Store.List() { + deployments = append(deployments, *(c.(*extensions.Deployment))) + } + return deployments, nil +} + +// GetDeploymentsForReplicaSet returns a list of deployments managing a replica set. Returns an error only if no matching deployments are found. +func (s *StoreToDeploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) (deployments []extensions.Deployment, err error) { + var d extensions.Deployment + + if len(rs.Labels) == 0 { + err = fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + return + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + for _, m := range s.Store.List() { + d = *m.(*extensions.Deployment) + if d.Namespace != rs.Namespace { + continue + } + + selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + if len(deployments) == 0 { + err = fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + return +} + +// StoreToReplicaSetLister gives a store List and Exists methods. The store must contain only ReplicaSets. +type StoreToReplicaSetLister struct { + Store +} + +// Exists checks if the given ReplicaSet exists in the store. +func (s *StoreToReplicaSetLister) Exists(rs *extensions.ReplicaSet) (bool, error) { + _, exists, err := s.Store.Get(rs) + if err != nil { + return false, err + } + return exists, nil +} + +// List lists all ReplicaSets in the store. +// TODO: converge on the interface in pkg/client +func (s *StoreToReplicaSetLister) List() (rss []extensions.ReplicaSet, err error) { + for _, rs := range s.Store.List() { + rss = append(rss, *(rs.(*extensions.ReplicaSet))) + } + return rss, nil +} + +type storeReplicaSetsNamespacer struct { + store Store + namespace string +} + +func (s storeReplicaSetsNamespacer) List(selector labels.Selector) (rss []extensions.ReplicaSet, err error) { + for _, c := range s.store.List() { + rs := *(c.(*extensions.ReplicaSet)) + if s.namespace == api.NamespaceAll || s.namespace == rs.Namespace { + if selector.Matches(labels.Set(rs.Labels)) { + rss = append(rss, rs) + } + } + } + return +} + +func (s *StoreToReplicaSetLister) ReplicaSets(namespace string) storeReplicaSetsNamespacer { + return storeReplicaSetsNamespacer{s.Store, namespace} +} + +// GetPodReplicaSets returns a list of ReplicaSets managing a pod. Returns an error only if no matching ReplicaSets are found. +func (s *StoreToReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []extensions.ReplicaSet, err error) { + var selector labels.Selector + var rs extensions.ReplicaSet + + if len(pod.Labels) == 0 { + err = fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + return + } + + for _, m := range s.Store.List() { + rs = *m.(*extensions.ReplicaSet) + if rs.Namespace != pod.Namespace { + continue + } + selector, err = unversioned.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + err = fmt.Errorf("invalid selector: %v", err) + return + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + if len(rss) == 0 { + err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// StoreToDaemonSetLister gives a store List and Exists methods. The store must contain only DaemonSets. +type StoreToDaemonSetLister struct { + Store +} + +// Exists checks if the given daemon set exists in the store. +func (s *StoreToDaemonSetLister) Exists(ds *extensions.DaemonSet) (bool, error) { + _, exists, err := s.Store.Get(ds) + if err != nil { + return false, err + } + return exists, nil +} + +// List lists all daemon sets in the store. +// TODO: converge on the interface in pkg/client +func (s *StoreToDaemonSetLister) List() (dss extensions.DaemonSetList, err error) { + for _, c := range s.Store.List() { + dss.Items = append(dss.Items, *(c.(*extensions.DaemonSet))) + } + return dss, nil +} + +// GetPodDaemonSets returns a list of daemon sets managing a pod. +// Returns an error if and only if no matching daemon sets are found. +func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) { + var selector labels.Selector + var daemonSet extensions.DaemonSet + + if len(pod.Labels) == 0 { + err = fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + return + } + + for _, m := range s.Store.List() { + daemonSet = *m.(*extensions.DaemonSet) + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + if len(daemonSets) == 0 { + err = fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// StoreToServiceLister makes a Store that has the List method of the client.ServiceInterface +// The Store must contain (only) Services. +type StoreToServiceLister struct { + Store +} + +func (s *StoreToServiceLister) List() (services api.ServiceList, err error) { + for _, m := range s.Store.List() { + services.Items = append(services.Items, *(m.(*api.Service))) + } + return services, nil +} + +// TODO: Move this back to scheduler as a helper function that takes a Store, +// rather than a method of StoreToServiceLister. +func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { + var selector labels.Selector + var service api.Service + + for _, m := range s.Store.List() { + service = *m.(*api.Service) + // consider only services that are in the same namespace as the pod + if service.Namespace != pod.Namespace { + continue + } + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } + selector = labels.Set(service.Spec.Selector).AsSelector() + if selector.Matches(labels.Set(pod.Labels)) { + services = append(services, service) + } + } + if len(services) == 0 { + err = fmt.Errorf("could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return +} + +// StoreToEndpointsLister makes a Store that lists endpoints. +type StoreToEndpointsLister struct { + Store +} + +// List lists all endpoints in the store. +func (s *StoreToEndpointsLister) List() (services api.EndpointsList, err error) { + for _, m := range s.Store.List() { + services.Items = append(services.Items, *(m.(*api.Endpoints))) + } + return services, nil +} + +// GetServiceEndpoints returns the endpoints of a service, matched on service name. +func (s *StoreToEndpointsLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) { + for _, m := range s.Store.List() { + ep = *m.(*api.Endpoints) + if svc.Name == ep.Name && svc.Namespace == ep.Namespace { + return ep, nil + } + } + err = fmt.Errorf("could not find endpoints for service: %v", svc.Name) + return +} + +// StoreToJobLister gives a store List and Exists methods. The store must contain only Jobs. +type StoreToJobLister struct { + Store +} + +// Exists checks if the given job exists in the store. +func (s *StoreToJobLister) Exists(job *batch.Job) (bool, error) { + _, exists, err := s.Store.Get(job) + if err != nil { + return false, err + } + return exists, nil +} + +// StoreToJobLister lists all jobs in the store. +func (s *StoreToJobLister) List() (jobs batch.JobList, err error) { + for _, c := range s.Store.List() { + jobs.Items = append(jobs.Items, *(c.(*batch.Job))) + } + return jobs, nil +} + +// GetPodJobs returns a list of jobs managing a pod. Returns an error only if no matching jobs are found. +func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []batch.Job, err error) { + var selector labels.Selector + var job batch.Job + + if len(pod.Labels) == 0 { + err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name) + return + } + + for _, m := range s.Store.List() { + job = *m.(*batch.Job) + if job.Namespace != pod.Namespace { + continue + } + + selector, _ = unversioned.LabelSelectorAsSelector(job.Spec.Selector) + if !selector.Matches(labels.Set(pod.Labels)) { + continue + } + jobs = append(jobs, job) + } + if len(jobs) == 0 { + err = fmt.Errorf("could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// Typed wrapper around a store of PersistentVolumes +type StoreToPVFetcher struct { + Store +} + +// GetPersistentVolumeInfo returns cached data for the PersistentVolume 'id'. +func (s *StoreToPVFetcher) GetPersistentVolumeInfo(id string) (*api.PersistentVolume, error) { + o, exists, err := s.Get(&api.PersistentVolume{ObjectMeta: api.ObjectMeta{Name: id}}) + + if err != nil { + return nil, fmt.Errorf("error retrieving PersistentVolume '%v' from cache: %v", id, err) + } + + if !exists { + return nil, fmt.Errorf("PersistentVolume '%v' not found", id) + } + + return o.(*api.PersistentVolume), nil +} + +// Typed wrapper around a store of PersistentVolumeClaims +type StoreToPVCFetcher struct { + Store +} + +// GetPersistentVolumeClaimInfo returns cached data for the PersistentVolumeClaim 'id'. +func (s *StoreToPVCFetcher) GetPersistentVolumeClaimInfo(namespace string, id string) (*api.PersistentVolumeClaim, error) { + o, exists, err := s.Get(&api.PersistentVolumeClaim{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: id}}) + if err != nil { + return nil, fmt.Errorf("error retrieving PersistentVolumeClaim '%s/%s' from cache: %v", namespace, id, err) + } + + if !exists { + return nil, fmt.Errorf("PersistentVolumeClaim '%s/%s' not found", namespace, id) + } + + return o.(*api.PersistentVolumeClaim), nil +} + +// StoreToPetSetLister gives a store List and Exists methods. The store must contain only PetSets. +type StoreToPetSetLister struct { + Store +} + +// Exists checks if the given PetSet exists in the store. +func (s *StoreToPetSetLister) Exists(ps *apps.PetSet) (bool, error) { + _, exists, err := s.Store.Get(ps) + if err != nil { + return false, err + } + return exists, nil +} + +// List lists all PetSets in the store. +func (s *StoreToPetSetLister) List() (psList []apps.PetSet, err error) { + for _, ps := range s.Store.List() { + psList = append(psList, *(ps.(*apps.PetSet))) + } + return psList, nil +} + +type storePetSetsNamespacer struct { + store Store + namespace string +} + +func (s *StoreToPetSetLister) PetSets(namespace string) storePetSetsNamespacer { + return storePetSetsNamespacer{s.Store, namespace} +} + +// GetPodPetSets returns a list of PetSets managing a pod. Returns an error only if no matching PetSets are found. +func (s *StoreToPetSetLister) GetPodPetSets(pod *api.Pod) (psList []apps.PetSet, err error) { + var selector labels.Selector + var ps apps.PetSet + + if len(pod.Labels) == 0 { + err = fmt.Errorf("no PetSets found for pod %v because it has no labels", pod.Name) + return + } + + for _, m := range s.Store.List() { + ps = *m.(*apps.PetSet) + if ps.Namespace != pod.Namespace { + continue + } + selector, err = unversioned.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + err = fmt.Errorf("invalid selector: %v", err) + return + } + + // If a PetSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + if len(psList) == 0 { + err = fmt.Errorf("could not find PetSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go new file mode 100644 index 00000000..06c2f611 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go @@ -0,0 +1,86 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// ListFunc knows how to list resources +type ListFunc func(options api.ListOptions) (runtime.Object, error) + +// WatchFunc knows how to watch resources +type WatchFunc func(options api.ListOptions) (watch.Interface, error) + +// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. +// It is a convenience function for users of NewReflector, etc. +// ListFunc and WatchFunc must not be nil +type ListWatch struct { + ListFunc ListFunc + WatchFunc WatchFunc +} + +// Getter interface knows how to access Get method from RESTClient. +type Getter interface { + Get() *restclient.Request +} + +// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. +func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + listFunc := func(options api.ListOptions) (runtime.Object, error) { + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, api.ParameterCodec). + FieldsSelectorParam(fieldSelector). + Do(). + Get() + } + watchFunc := func(options api.ListOptions) (watch.Interface, error) { + return c.Get(). + Prefix("watch"). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, api.ParameterCodec). + FieldsSelectorParam(fieldSelector). + Watch() + } + return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} +} + +func timeoutFromListOptions(options api.ListOptions) time.Duration { + if options.TimeoutSeconds != nil { + return time.Duration(*options.TimeoutSeconds) * time.Second + } + return 0 +} + +// List a set of apiserver resources +func (lw *ListWatch) List(options api.ListOptions) (runtime.Object, error) { + return lw.ListFunc(options) +} + +// Watch a set of apiserver resources +func (lw *ListWatch) Watch(options api.ListOptions) (watch.Interface, error) { + return lw.WatchFunc(options) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go b/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go new file mode 100644 index 00000000..3a5025a2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go @@ -0,0 +1,423 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/url" + "reflect" + "regexp" + goruntime "runtime" + "runtime/debug" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + apierrs "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + // List should return a list type object; the Items field will be extracted, and the + // ResourceVersion field will be used to start the watch in the right place. + List(options api.ListOptions) (runtime.Object, error) + // Watch should begin a watch at the specified version. + Watch(options api.ListOptions) (watch.Interface, error) +} + +// Reflector watches a specified resource and causes all changes to be reflected in the given store. +type Reflector struct { + // name identifies this reflector. By default it will be a file:line if possible. + name string + + // The type of object we expect to place in the store. + expectedType reflect.Type + // The destination to sync up with the watch source + store Store + // listerWatcher is used to perform lists and watches. + listerWatcher ListerWatcher + // period controls timing between one watch ending and + // the beginning of the next one. + period time.Duration + resyncPeriod time.Duration + // now() returns current time - exposed for testing purposes + now func() time.Time + // nextResync is approximate time of next resync (0 if not scheduled) + nextResync time.Time + // lastSyncResourceVersion is the resource version token last + // observed when doing a sync with the underlying store + // it is thread safe, but not synchronized with the underlying store + lastSyncResourceVersion string + // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion + lastSyncResourceVersionMutex sync.RWMutex +} + +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + // However, it can be modified to avoid periodic resync to break the + // TCP connection. + minWatchTimeout = 5 * time.Minute + // If we are within 'forceResyncThreshold' from the next planned resync + // and are just before issuing Watch(), resync will be forced now. + forceResyncThreshold = 3 * time.Second + // We try to set timeouts for Watch() so that we will finish about + // than 'timeoutThreshold' from next planned periodic resync. + timeoutThreshold = 1 * time.Second +) + +// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector +// The indexer is configured to key on namespace +func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc}) + reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) + return indexer, reflector +} + +// NewReflector creates a new Reflector object which will keep the given store up to +// date with the server's contents for the given resource. Reflector promises to +// only put things in the store that have the type of expectedType, unless expectedType +// is nil. If resyncPeriod is non-zero, then lists will be executed after every +// resyncPeriod, so that you can use reflectors to periodically process everything as +// well as incrementally processing the things that change. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod) +} + +// NewNamedReflector same as NewReflector, but with a specified name for logging +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + r := &Reflector{ + name: name, + listerWatcher: lw, + store: store, + expectedType: reflect.TypeOf(expectedType), + period: time.Second, + resyncPeriod: resyncPeriod, + now: time.Now, + } + return r +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"kubernetes/pkg/client/cache/", "kubernetes/pkg/controller/framework/", "/runtime/asm_"} + +// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages +// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging +func getDefaultReflectorName(ignoredPackages ...string) string { + name := "????" + const maxStack = 10 + for i := 1; i < maxStack; i++ { + _, file, line, ok := goruntime.Caller(i) + if !ok { + file, line, ok = extractStackCreator() + if !ok { + break + } + i += maxStack + } + if hasPackage(file, ignoredPackages) { + continue + } + + file = trimPackagePrefix(file) + name = fmt.Sprintf("%s:%d", file, line) + break + } + return name +} + +// hasPackage returns true if the file is in one of the ignored packages. +func hasPackage(file string, ignoredPackages []string) bool { + for _, ignoredPackage := range ignoredPackages { + if strings.Contains(file, ignoredPackage) { + return true + } + } + return false +} + +// trimPackagePrefix reduces dulpicate values off the front of a package name. +func trimPackagePrefix(file string) string { + if l := strings.LastIndex(file, "k8s.io/kubernetes/pkg/"); l >= 0 { + return file[l+len("k8s.io/kubernetes/"):] + } + if l := strings.LastIndex(file, "/src/"); l >= 0 { + return file[l+5:] + } + if l := strings.LastIndex(file, "/pkg/"); l >= 0 { + return file[l+1:] + } + return file +} + +var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`) + +// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false +// if the creator cannot be located. +// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440 +func extractStackCreator() (string, int, bool) { + stack := debug.Stack() + matches := stackCreator.FindStringSubmatch(string(stack)) + if matches == nil || len(matches) != 4 { + return "", 0, false + } + line, err := strconv.Atoi(matches[3]) + if err != nil { + return "", 0, false + } + return matches[2], line, true +} + +// Run starts a watch and handles watch events. Will restart the watch if it is closed. +// Run starts a goroutine and returns immediately. +func (r *Reflector) Run() { + glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + go wait.Until(func() { + if err := r.ListAndWatch(wait.NeverStop); err != nil { + utilruntime.HandleError(err) + } + }, r.period, wait.NeverStop) +} + +// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed. +// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed. +func (r *Reflector) RunUntil(stopCh <-chan struct{}) { + glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + go wait.Until(func() { + if err := r.ListAndWatch(stopCh); err != nil { + utilruntime.HandleError(err) + } + }, r.period, stopCh) +} + +var ( + // nothing will ever be sent down this channel + neverExitWatch <-chan time.Time = make(chan time.Time) + + // Used to indicate that watching stopped so that a resync could happen. + errorResyncRequested = errors.New("resync channel fired") + + // Used to indicate that watching stopped because of a signal from the stop + // channel passed in from a client of the reflector. + errorStopRequested = errors.New("Stop requested") +) + +// resyncChan returns a channel which will receive something when a resync is +// required, and a cleanup function. +func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { + if r.resyncPeriod == 0 { + r.nextResync = time.Time{} + return neverExitWatch, func() bool { return false } + } + // The cleanup function is required: imagine the scenario where watches + // always fail so we end up listing frequently. Then, if we don't + // manually stop the timer, we could end up with many timers active + // concurrently. + r.nextResync = r.now().Add(r.resyncPeriod) + t := time.NewTimer(r.resyncPeriod) + return t.C, t.Stop +} + +// ListAndWatch first lists all items and get the resource version at the moment of call, +// and then use the resource version to watch. +// It returns error if ListAndWatch didn't even try to initialize watch. +func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { + glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + var resourceVersion string + resyncCh, cleanup := r.resyncChan() + defer cleanup() + + // Explicitly set "0" as resource version - it's fine for the List() + // to be served from cache and potentially be delayed relative to + // etcd contents. Reflector framework will catch up via Watch() eventually. + options := api.ListOptions{ResourceVersion: "0"} + list, err := r.listerWatcher.List(options) + if err != nil { + return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) + } + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) + } + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + } + r.setLastSyncResourceVersion(resourceVersion) + + resyncerrc := make(chan error, 1) + go func() { + for { + select { + case <-resyncCh: + case <-stopCh: + return + } + glog.V(4).Infof("%s: next resync planned for %#v, forcing now", r.name, r.nextResync) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + cleanup() + resyncCh, cleanup = r.resyncChan() + } + }() + + for { + timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options = api.ListOptions{ + ResourceVersion: resourceVersion, + // We want to avoid situations of hanging watchers. Stop any wachers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timemoutseconds, + } + + w, err := r.listerWatcher.Watch(options) + if err != nil { + switch err { + case io.EOF: + // watch closed normally + case io.ErrUnexpectedEOF: + glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + default: + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) + } + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case wait and resend watch request. + if urlError, ok := err.(*url.Error); ok { + if opError, ok := urlError.Err.(*net.OpError); ok { + if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + time.Sleep(time.Second) + continue + } + } + } + return nil + } + + if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { + if err != errorStopRequested { + glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + } + return nil + } + } +} + +// syncWith replaces the store's items with the given list. +func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { + found := make([]interface{}, 0, len(items)) + for _, item := range items { + found = append(found, item) + } + return r.store.Replace(found, resourceVersion) +} + +// watchHandler watches w and keeps *resourceVersion up to date. +func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { + start := time.Now() + eventCount := 0 + + // Stopping the watcher should be idempotent and if we return from this function there's no way + // we're coming back in with the same watch interface. + defer w.Stop() + +loop: + for { + select { + case <-stopCh: + return errorStopRequested + case err := <-errc: + return err + case event, ok := <-w.ResultChan(): + if !ok { + break loop + } + if event.Type == watch.Error { + return apierrs.FromObject(event.Object) + } + if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + continue + } + meta, err := meta.Accessor(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + continue + } + newResourceVersion := meta.GetResourceVersion() + switch event.Type { + case watch.Added: + r.store.Add(event.Object) + case watch.Modified: + r.store.Update(event.Object) + case watch.Deleted: + // TODO: Will any consumers need access to the "last known + // state", which is passed in event.Object? If so, may need + // to change this. + r.store.Delete(event.Object) + default: + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + } + *resourceVersion = newResourceVersion + r.setLastSyncResourceVersion(newResourceVersion) + eventCount++ + } + } + + watchDuration := time.Now().Sub(start) + if watchDuration < 1*time.Second && eventCount == 0 { + glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name) + return errors.New("very short watch") + } + glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + return nil +} + +// LastSyncResourceVersion is the resource version observed when last sync with the underlying store +// The value returned is not synchronized with access to the underlying store and is not thread-safe +func (r *Reflector) LastSyncResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + return r.lastSyncResourceVersion +} + +func (r *Reflector) setLastSyncResourceVersion(v string) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.lastSyncResourceVersion = v +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/store.go b/vendor/k8s.io/kubernetes/pkg/client/cache/store.go new file mode 100644 index 00000000..71115f2c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/store.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api/meta" +) + +// Store is a generic object storage interface. Reflector knows how to watch a server +// and update a store. A generic store is provided, which allows Reflector to be used +// as a local caching system, and an LRU store, which allows Reflector to work like a +// queue of items yet to be processed. +// +// Store makes no assumptions about stored object identity; it is the responsibility +// of a Store implementation to provide a mechanism to correctly key objects and to +// define the contract for obtaining objects by some arbitrary key type. +type Store interface { + Add(obj interface{}) error + Update(obj interface{}) error + Delete(obj interface{}) error + List() []interface{} + ListKeys() []string + Get(obj interface{}) (item interface{}, exists bool, err error) + GetByKey(key string) (item interface{}, exists bool, err error) + + // Replace will delete the contents of the store, using instead the + // given list. Store takes ownership of the list, you should not reference + // it after calling this function. + Replace([]interface{}, string) error + Resync() error +} + +// KeyFunc knows how to make a key from an object. Implementations should be deterministic. +type KeyFunc func(obj interface{}) (string, error) + +// KeyError will be returned any time a KeyFunc gives an error; it includes the object +// at fault. +type KeyError struct { + Obj interface{} + Err error +} + +// Error gives a human-readable description of the error. +func (k KeyError) Error() string { + return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) +} + +// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for +// the object but not the object itself. +type ExplicitKey string + +// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make +// keys for API objects which implement meta.Interface. +// The key uses the format / unless is empty, then +// it's just . +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func MetaNamespaceKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(ExplicitKey); ok { + return string(key), nil + } + meta, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("object has no meta: %v", err) + } + if len(meta.GetNamespace()) > 0 { + return meta.GetNamespace() + "/" + meta.GetName(), nil + } + return meta.GetName(), nil +} + +// SplitMetaNamespaceKey returns the namespace and name that +// MetaNamespaceKeyFunc encoded into key. +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { + parts := strings.Split(key, "/") + switch len(parts) { + case 1: + // name only, no namespace + return "", parts[0], nil + case 2: + // name and namespace + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected key format: %q", key) +} + +// cache responsibilities are limited to: +// 1. Computing keys for objects via keyFunc +// 2. Invoking methods of a ThreadSafeStorage interface +type cache struct { + // cacheStorage bears the burden of thread safety for the cache + cacheStorage ThreadSafeStore + // keyFunc is used to make the key for objects stored in and retrieved from items, and + // should be deterministic. + keyFunc KeyFunc +} + +var _ Store = &cache{} + +// Add inserts an item into the cache. +func (c *cache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, obj) + return nil +} + +// Update sets an item in the cache to its updated state. +func (c *cache) Update(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Update(key, obj) + return nil +} + +// Delete removes an item from the cache. +func (c *cache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// List returns a list of all the items. +// List is completely threadsafe as long as you treat all items as immutable. +func (c *cache) List() []interface{} { + return c.cacheStorage.List() +} + +// ListKeys returns a list of all the keys of the objects currently +// in the cache. +func (c *cache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// GetIndexers returns the indexers of cache +func (c *cache) GetIndexers() Indexers { + return c.cacheStorage.GetIndexers() +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { + return c.cacheStorage.Index(indexName, obj) +} + +// ListIndexFuncValues returns the list of generated values of an Index func +func (c *cache) ListIndexFuncValues(indexName string) []string { + return c.cacheStorage.ListIndexFuncValues(indexName) +} + +func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexKey) +} + +func (c *cache) AddIndexers(newIndexers Indexers) error { + return c.cacheStorage.AddIndexers(newIndexers) +} + +// Get returns the requested item, or sets exists=false. +// Get is completely threadsafe as long as you treat all items as immutable. +func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return c.GetByKey(key) +} + +// GetByKey returns the request item, or exists=false. +// GetByKey is completely threadsafe as long as you treat all items as immutable. +func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) { + item, exists = c.cacheStorage.Get(key) + return item, exists, nil +} + +// Replace will delete the contents of 'c', using instead the given list. +// 'c' takes ownership of the list, you should not reference the list again +// after calling this function. +func (c *cache) Replace(list []interface{}, resourceVersion string) error { + items := map[string]interface{}{} + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync touches all items in the store to force processing +func (c *cache) Resync() error { + return c.cacheStorage.Resync() +} + +// NewStore returns a Store implemented simply with a map and a lock. +func NewStore(keyFunc KeyFunc) Store { + return &cache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + } +} + +// NewIndexer returns an Indexer implemented simply with a map and a lock. +func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { + return &cache{ + cacheStorage: NewThreadSafeStore(indexers, Indices{}), + keyFunc: keyFunc, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go b/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go new file mode 100644 index 00000000..11077e25 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go @@ -0,0 +1,287 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + + "k8s.io/kubernetes/pkg/util/sets" +) + +// ThreadSafeStore is an interface that allows concurrent access to a storage backend. +// TL;DR caveats: you must not modify anything returned by Get or List as it will break +// the indexing feature in addition to not being thread safe. +// +// The guarantees of thread safety provided by List/Get are only valid if the caller +// treats returned items as read-only. For example, a pointer inserted in the store +// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get` +// on the same key and modify the pointer in a non-thread-safe way. Also note that +// modifying objects stored by the indexers (if any) will *not* automatically lead +// to a re-index. So it's not a good idea to directly modify the objects returned by +// Get/List, in general. +type ThreadSafeStore interface { + Add(key string, obj interface{}) + Update(key string, obj interface{}) + Delete(key string) + Get(key string) (item interface{}, exists bool) + List() []interface{} + ListKeys() []string + Replace(map[string]interface{}, string) + Index(indexName string, obj interface{}) ([]interface{}, error) + ListIndexFuncValues(name string) []string + ByIndex(indexName, indexKey string) ([]interface{}, error) + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error + Resync() error +} + +// threadSafeMap implements ThreadSafeStore +type threadSafeMap struct { + lock sync.RWMutex + items map[string]interface{} + + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (c *threadSafeMap) Add(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Update(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Delete(key string) { + c.lock.Lock() + defer c.lock.Unlock() + if obj, exists := c.items[key]; exists { + c.deleteFromIndices(obj, key) + delete(c.items, key) + } +} + +func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) { + c.lock.RLock() + defer c.lock.RUnlock() + item, exists = c.items[key] + return item, exists +} + +func (c *threadSafeMap) List() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]interface{}, 0, len(c.items)) + for _, item := range c.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the threadSafeMap. +func (c *threadSafeMap) ListKeys() []string { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]string, 0, len(c.items)) + for key := range c.items { + list = append(list, key) + } + return list +} + +func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { + c.lock.Lock() + defer c.lock.Unlock() + c.items = items + + // rebuild any index + c.indices = Indices{} + for key, item := range c.items { + c.updateIndices(nil, item, key) + } +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexKeys, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := c.indices[indexName] + + // need to de-dupe the return list. Since multiple keys are allowed, this can happen. + returnKeySet := sets.String{} + for _, indexKey := range indexKeys { + set := index[indexKey] + for _, key := range set.List() { + returnKeySet.Insert(key) + } + } + + list := make([]interface{}, 0, returnKeySet.Len()) + for absoluteKey := range returnKeySet { + list = append(list, c.items[absoluteKey]) + } + return list, nil +} + +// ByIndex returns a list of items that match an exact value on the index function +func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + indexFunc := c.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := c.indices[indexName] + + set := index[indexKey] + list := make([]interface{}, 0, set.Len()) + for _, key := range set.List() { + list = append(list, c.items[key]) + } + + return list, nil +} + +func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { + c.lock.RLock() + defer c.lock.RUnlock() + + index := c.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (c *threadSafeMap) GetIndexers() Indexers { + return c.indexers +} + +func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.items) > 0 { + return fmt.Errorf("cannot add indexers to running index") + } + + oldKeys := sets.StringKeySet(c.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + c.indexers[k] = v + } + return nil +} + +// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error { + // if we got an old object, we need to remove it before we add it again + if oldObj != nil { + c.deleteFromIndices(oldObj, key) + } + for name, indexFunc := range c.indexers { + indexValues, err := indexFunc(newObj) + if err != nil { + return err + } + index := c.indices[name] + if index == nil { + index = Index{} + c.indices[name] = index + } + + for _, indexValue := range indexValues { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) + } + } + return nil +} + +// deleteFromIndices removes the object from each of the managed indexes +// it is intended to be called from a function that already has a lock on the cache +func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { + for name, indexFunc := range c.indexers { + indexValues, err := indexFunc(obj) + if err != nil { + return err + } + + index := c.indices[name] + for _, indexValue := range indexValues { + if index != nil { + set := index[indexValue] + if set != nil { + set.Delete(key) + } + } + } + } + return nil +} + +func (c *threadSafeMap) Resync() error { + // Nothing to do + return nil +} + +func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { + return &threadSafeMap{ + items: map[string]interface{}{}, + indexers: indexers, + indices: indices, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go b/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go new file mode 100644 index 00000000..4a8a4500 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// UndeltaStore listens to incremental updates and sends complete state on every change. +// It implements the Store interface so that it can receive a stream of mirrored objects +// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change +// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc. +// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results +// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values. +// PushFunc should be thread safe. +type UndeltaStore struct { + Store + PushFunc func([]interface{}) +} + +// Assert that it implements the Store interface. +var _ Store = &UndeltaStore{} + +// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. +// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} +// and the List. So, the following can happen, resulting in two identical calls to PushFunc. +// time thread 1 thread 2 +// 0 UndeltaStore.Add(a) +// 1 UndeltaStore.Add(b) +// 2 Store.Add(a) +// 3 Store.Add(b) +// 4 Store.List() -> [a,b] +// 5 Store.List() -> [a,b] + +func (u *UndeltaStore) Add(obj interface{}) error { + if err := u.Store.Add(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +func (u *UndeltaStore) Update(obj interface{}) error { + if err := u.Store.Update(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +func (u *UndeltaStore) Delete(obj interface{}) error { + if err := u.Store.Delete(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { + if err := u.Store.Replace(list, resourceVersion); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// NewUndeltaStore returns an UndeltaStore implemented with a Store. +func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore { + return &UndeltaStore{ + Store: NewStore(keyFunc), + PushFunc: pushFunc, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go new file mode 100644 index 00000000..8b958a58 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go @@ -0,0 +1,158 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalclientset + +import ( + "github.com/golang/glog" + unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" + unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" + unversionedrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() unversionedcore.CoreInterface + Extensions() unversionedextensions.ExtensionsInterface + Autoscaling() unversionedautoscaling.AutoscalingInterface + Batch() unversionedbatch.BatchInterface + Rbac() unversionedrbac.RbacInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *unversionedcore.CoreClient + *unversionedextensions.ExtensionsClient + *unversionedautoscaling.AutoscalingClient + *unversionedbatch.BatchClient + *unversionedrbac.RbacClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() unversionedcore.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Extensions retrieves the ExtensionsClient +func (c *Clientset) Extensions() unversionedextensions.ExtensionsInterface { + if c == nil { + return nil + } + return c.ExtensionsClient +} + +// Autoscaling retrieves the AutoscalingClient +func (c *Clientset) Autoscaling() unversionedautoscaling.AutoscalingInterface { + if c == nil { + return nil + } + return c.AutoscalingClient +} + +// Batch retrieves the BatchClient +func (c *Clientset) Batch() unversionedbatch.BatchInterface { + if c == nil { + return nil + } + return c.BatchClient +} + +// Rbac retrieves the RbacClient +func (c *Clientset) Rbac() unversionedrbac.RbacInterface { + if c == nil { + return nil + } + return c.RbacClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.ExtensionsClient, err = unversionedextensions.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.AutoscalingClient, err = unversionedautoscaling.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.BatchClient, err = unversionedbatch.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.RbacClient, err = unversionedrbac.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = unversionedcore.NewForConfigOrDie(c) + clientset.ExtensionsClient = unversionedextensions.NewForConfigOrDie(c) + clientset.AutoscalingClient = unversionedautoscaling.NewForConfigOrDie(c) + clientset.BatchClient = unversionedbatch.NewForConfigOrDie(c) + clientset.RbacClient = unversionedrbac.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = unversionedcore.New(c) + clientset.ExtensionsClient = unversionedextensions.New(c) + clientset.AutoscalingClient = unversionedautoscaling.New(c) + clientset.BatchClient = unversionedbatch.New(c) + clientset.RbacClient = unversionedrbac.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go new file mode 100644 index 00000000..3934caa4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated clientset. +package internalclientset diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go new file mode 100644 index 00000000..8bdbe2e6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalclientset + +// These imports are the API groups the client will support. +import ( + "fmt" + + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" +) + +func init() { + if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go new file mode 100644 index 00000000..752b5d55 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type AutoscalingInterface interface { + GetRESTClient() *restclient.RESTClient + HorizontalPodAutoscalersGetter +} + +// AutoscalingClient is used to interact with features provided by the Autoscaling group. +type AutoscalingClient struct { + *restclient.RESTClient +} + +func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingClient for the given config. +func NewForConfig(c *restclient.Config) (*AutoscalingClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingClient{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *AutoscalingClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingClient for the given RESTClient. +func New(c *restclient.RESTClient) *AutoscalingClient { + return &AutoscalingClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if autoscaling group is not registered, return an error + g, err := registered.Group("autoscaling") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go new file mode 100644 index 00000000..47517b64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go new file mode 100644 index 00000000..39324902 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go new file mode 100644 index 00000000..ae185ad7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + watch "k8s.io/kubernetes/pkg/watch" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Update(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + UpdateStatus(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) + List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client *AutoscalingClient + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { + result = &autoscaling.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go new file mode 100644 index 00000000..83d9d749 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type BatchInterface interface { + GetRESTClient() *restclient.RESTClient + JobsGetter + ScheduledJobsGetter +} + +// BatchClient is used to interact with features provided by the Batch group. +type BatchClient struct { + *restclient.RESTClient +} + +func (c *BatchClient) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface { + return newScheduledJobs(c, namespace) +} + +// NewForConfig creates a new BatchClient for the given config. +func NewForConfig(c *restclient.Config) (*BatchClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchClient{client}, nil +} + +// NewForConfigOrDie creates a new BatchClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *BatchClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchClient for the given RESTClient. +func New(c *restclient.RESTClient) *BatchClient { + return &BatchClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if batch group is not registered, return an error + g, err := registered.Group("batch") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go new file mode 100644 index 00000000..47517b64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go new file mode 100644 index 00000000..f876ef63 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type JobExpansion interface{} + +type ScheduledJobExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go new file mode 100644 index 00000000..680c5065 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + batch "k8s.io/kubernetes/pkg/apis/batch" + watch "k8s.io/kubernetes/pkg/watch" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*batch.Job) (*batch.Job, error) + Update(*batch.Job) (*batch.Job, error) + UpdateStatus(*batch.Job) (*batch.Job, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*batch.Job, error) + List(opts api.ListOptions) (*batch.JobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client *BatchClient + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchClient, namespace string) *jobs { + return &jobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go new file mode 100644 index 00000000..2675d11c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + batch "k8s.io/kubernetes/pkg/apis/batch" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ScheduledJobsGetter has a method to return a ScheduledJobInterface. +// A group's client should implement this interface. +type ScheduledJobsGetter interface { + ScheduledJobs(namespace string) ScheduledJobInterface +} + +// ScheduledJobInterface has methods to work with ScheduledJob resources. +type ScheduledJobInterface interface { + Create(*batch.ScheduledJob) (*batch.ScheduledJob, error) + Update(*batch.ScheduledJob) (*batch.ScheduledJob, error) + UpdateStatus(*batch.ScheduledJob) (*batch.ScheduledJob, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*batch.ScheduledJob, error) + List(opts api.ListOptions) (*batch.ScheduledJobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ScheduledJobExpansion +} + +// scheduledJobs implements ScheduledJobInterface +type scheduledJobs struct { + client *BatchClient + ns string +} + +// newScheduledJobs returns a ScheduledJobs +func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs { + return &scheduledJobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a scheduledJob and creates it. Returns the server's representation of the scheduledJob, and an error, if there is any. +func (c *scheduledJobs) Create(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("scheduledjobs"). + Body(scheduledJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a scheduledJob and updates it. Returns the server's representation of the scheduledJob, and an error, if there is any. +func (c *scheduledJobs) Update(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(scheduledJob.Name). + Body(scheduledJob). + Do(). + Into(result) + return +} + +func (c *scheduledJobs) UpdateStatus(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(scheduledJob.Name). + SubResource("status"). + Body(scheduledJob). + Do(). + Into(result) + return +} + +// Delete takes name of the scheduledJob and deletes it. Returns an error if one occurs. +func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *scheduledJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("scheduledjobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the scheduledJob, and returns the corresponding scheduledJob object, and an error if there is any. +func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scheduledjobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ScheduledJobs that match those selectors. +func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { + result = &batch.ScheduledJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("scheduledjobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested scheduledJobs. +func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("scheduledjobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go new file mode 100644 index 00000000..0ef0667d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go @@ -0,0 +1,126 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*api.ComponentStatus) (*api.ComponentStatus, error) + Update(*api.ComponentStatus) (*api.ComponentStatus, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.ComponentStatus, error) + List(opts api.ListOptions) (*api.ComponentStatusList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client *CoreClient +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreClient) *componentStatuses { + return &componentStatuses{ + client: c, + } +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { + result = &api.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { + result = &api.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) { + result = &api.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { + result = &api.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("componentstatuses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go new file mode 100644 index 00000000..b43e53d6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*api.ConfigMap) (*api.ConfigMap, error) + Update(*api.ConfigMap) (*api.ConfigMap, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.ConfigMap, error) + List(opts api.ListOptions) (*api.ConfigMapList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client *CoreClient + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreClient, namespace string) *configMaps { + return &configMaps{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { + result = &api.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { + result = &api.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string) (result *api.ConfigMap, err error) { + result = &api.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) { + result = &api.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go new file mode 100644 index 00000000..53368d24 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go @@ -0,0 +1,181 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PersistentVolumeClaimsGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + SecurityContextConstraintsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreClient) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreClient) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreClient) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreClient) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreClient) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreClient) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *CoreClient) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreClient) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreClient) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreClient) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreClient) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreClient) SecurityContextConstraints() SecurityContextConstraintsInterface { + return newSecurityContextConstraints(c) +} + +func (c *CoreClient) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreClient) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/api" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go new file mode 100644 index 00000000..47517b64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go new file mode 100644 index 00000000..78e2a087 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*api.Endpoints) (*api.Endpoints, error) + Update(*api.Endpoints) (*api.Endpoints, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Endpoints, error) + List(opts api.ListOptions) (*api.EndpointsList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client *CoreClient + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreClient, namespace string) *endpoints { + return &endpoints{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err error) { + result = &api.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, err error) { + result = &api.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string) (result *api.Endpoints, err error) { + result = &api.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { + result = &api.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go new file mode 100644 index 00000000..5627690a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*api.Event) (*api.Event, error) + Update(*api.Event) (*api.Event, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Event, error) + List(opts api.ListOptions) (*api.EventList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client *CoreClient + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreClient, namespace string) *events { + return &events{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *api.Event) (result *api.Event, err error) { + result = &api.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *api.Event) (result *api.Event, err error) { + result = &api.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string) (result *api.Event, err error) { + result = &api.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts api.ListOptions) (result *api.EventList, err error) { + result = &api.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go new file mode 100644 index 00000000..abdf89aa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go @@ -0,0 +1,157 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *api.Event) (*api.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *api.Event) (*api.Event, error) + Patch(event *api.Event, data []byte) (*api.Event, error) + // Search finds events about the specified object + Search(objOrRef runtime.Object) (*api.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *api.Event) (*api.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &api.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *api.Event) (*api.Event, error) { + result := &api.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// Patch modifies an existing event. It returns the copy of the event that the server returns, or an +// error. The namespace and name of the target event is deduced from the incompleteEvent. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) Patch(incompleteEvent *api.Event, data []byte) (*api.Event, error) { + result := &api.Event{} + err := e.client.Patch(api.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { + ref, err := api.GetReference(objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(api.ListOptions{FieldSelector: fieldSelector}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *api.Event) (*api.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *api.Event) (*api.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) { + return e.Interface.Patch(event, data) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go new file mode 100644 index 00000000..546f8e7a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type ComponentStatusExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} + +type ConfigMapExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go new file mode 100644 index 00000000..86cc9b07 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*api.LimitRange) (*api.LimitRange, error) + Update(*api.LimitRange) (*api.LimitRange, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.LimitRange, error) + List(opts api.ListOptions) (*api.LimitRangeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client *CoreClient + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreClient, namespace string) *limitRanges { + return &limitRanges{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { + result = &api.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { + result = &api.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string) (result *api.LimitRange, err error) { + result = &api.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { + result = &api.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go new file mode 100644 index 00000000..c1c8b450 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go @@ -0,0 +1,139 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*api.Namespace) (*api.Namespace, error) + Update(*api.Namespace) (*api.Namespace, error) + UpdateStatus(*api.Namespace) (*api.Namespace, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Namespace, error) + List(opts api.ListOptions) (*api.NamespaceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client *CoreClient +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreClient) *namespaces { + return &namespaces{ + client: c, + } +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +func (c *namespaces) UpdateStatus(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *namespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("namespaces"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string) (result *api.Namespace, err error) { + result = &api.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts api.ListOptions) (result *api.NamespaceList, err error) { + result = &api.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go new file mode 100644 index 00000000..8f47aec4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import "k8s.io/kubernetes/pkg/api" + +// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. +type NamespaceExpansion interface { + Finalize(item *api.Namespace) (*api.Namespace, error) +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go new file mode 100644 index 00000000..b0c53ef1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go @@ -0,0 +1,139 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*api.Node) (*api.Node, error) + Update(*api.Node) (*api.Node, error) + UpdateStatus(*api.Node) (*api.Node, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Node, error) + List(opts api.ListOptions) (*api.NodeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client *CoreClient +} + +// newNodes returns a Nodes +func newNodes(c *CoreClient) *nodes { + return &nodes{ + client: c, + } +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *api.Node) (result *api.Node, err error) { + result = &api.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *api.Node) (result *api.Node, err error) { + result = &api.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +func (c *nodes) UpdateStatus(node *api.Node) (result *api.Node, err error) { + result = &api.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string) (result *api.Node, err error) { + result = &api.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts api.ListOptions) (result *api.NodeList, err error) { + result = &api.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("nodes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go new file mode 100644 index 00000000..3146cdb3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import "k8s.io/kubernetes/pkg/api" + +// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. +type NodeExpansion interface { + // PatchStatus modifies the status of an existing node. It returns the copy + // of the node that the server returns, or an error. + PatchStatus(nodeName string, data []byte) (*api.Node, error) +} + +// PatchStatus modifies the status of an existing node. It returns the copy of +// the node that the server returns, or an error. +func (c *nodes) PatchStatus(nodeName string, data []byte) (*api.Node, error) { + result := &api.Node{} + err := c.client.Patch(api.StrategicMergePatchType). + Resource("nodes"). + Name(nodeName). + SubResource("status"). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go new file mode 100644 index 00000000..6b4d0f01 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go @@ -0,0 +1,139 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*api.PersistentVolume) (*api.PersistentVolume, error) + Update(*api.PersistentVolume) (*api.PersistentVolume, error) + UpdateStatus(*api.PersistentVolume) (*api.PersistentVolume, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.PersistentVolume, error) + List(opts api.ListOptions) (*api.PersistentVolumeList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client *CoreClient +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreClient) *persistentVolumes { + return &persistentVolumes{ + client: c, + } +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +func (c *persistentVolumes) UpdateStatus(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { + result = &api.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("persistentvolumes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go new file mode 100644 index 00000000..2f5b1743 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. +// A group's client should implement this interface. +type PersistentVolumeClaimsGetter interface { + PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface +} + +// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. +type PersistentVolumeClaimInterface interface { + Create(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + Update(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + UpdateStatus(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.PersistentVolumeClaim, error) + List(opts api.ListOptions) (*api.PersistentVolumeClaimList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PersistentVolumeClaimExpansion +} + +// persistentVolumeClaims implements PersistentVolumeClaimInterface +type persistentVolumeClaims struct { + client *CoreClient + ns string +} + +// newPersistentVolumeClaims returns a PersistentVolumeClaims +func newPersistentVolumeClaims(c *CoreClient, namespace string) *persistentVolumeClaims { + return &persistentVolumeClaims{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + SubResource("status"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. +func (c *persistentVolumeClaims) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumeClaims) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *persistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *persistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { + result = &api.PersistentVolumeClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *persistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go new file mode 100644 index 00000000..1cdfc8e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*api.Pod) (*api.Pod, error) + Update(*api.Pod) (*api.Pod, error) + UpdateStatus(*api.Pod) (*api.Pod, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Pod, error) + List(opts api.ListOptions) (*api.PodList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client *CoreClient + ns string +} + +// newPods returns a Pods +func newPods(c *CoreClient, namespace string) *pods { + return &pods{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts api.ListOptions) (result *api.PodList, err error) { + result = &api.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go new file mode 100644 index 00000000..8ebd29d3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *api.Binding) error + GetLogs(name string, opts *api.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *api.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go new file mode 100644 index 00000000..cccef29f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*api.PodTemplate) (*api.PodTemplate, error) + Update(*api.PodTemplate) (*api.PodTemplate, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.PodTemplate, error) + List(opts api.ListOptions) (*api.PodTemplateList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client *CoreClient + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreClient, namespace string) *podTemplates { + return &podTemplates{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { + result = &api.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { + result = &api.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string) (result *api.PodTemplate, err error) { + result = &api.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { + result = &api.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go new file mode 100644 index 00000000..6f9f0662 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*api.ReplicationController) (*api.ReplicationController, error) + Update(*api.ReplicationController) (*api.ReplicationController, error) + UpdateStatus(*api.ReplicationController) (*api.ReplicationController, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.ReplicationController, error) + List(opts api.ListOptions) (*api.ReplicationControllerList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client *CoreClient + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreClient, namespace string) *replicationControllers { + return &replicationControllers{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +func (c *replicationControllers) UpdateStatus(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { + result = &api.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go new file mode 100644 index 00000000..2d0da73f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*api.ResourceQuota) (*api.ResourceQuota, error) + Update(*api.ResourceQuota) (*api.ResourceQuota, error) + UpdateStatus(*api.ResourceQuota) (*api.ResourceQuota, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.ResourceQuota, error) + List(opts api.ListOptions) (*api.ResourceQuotaList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client *CoreClient + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { + result = &api.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go new file mode 100644 index 00000000..101fbdb5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*api.Secret) (*api.Secret, error) + Update(*api.Secret) (*api.Secret, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Secret, error) + List(opts api.ListOptions) (*api.SecretList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client *CoreClient + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreClient, namespace string) *secrets { + return &secrets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *api.Secret) (result *api.Secret, err error) { + result = &api.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { + result = &api.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string) (result *api.Secret, err error) { + result = &api.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts api.ListOptions) (result *api.SecretList, err error) { + result = &api.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints.go new file mode 100644 index 00000000..b98ac11a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints.go @@ -0,0 +1,126 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// SecurityContextConstraintsGetter has a method to return a SecurityContextConstraintsInterface. +// A group's client should implement this interface. +type SecurityContextConstraintsGetter interface { + SecurityContextConstraints() SecurityContextConstraintsInterface +} + +// SecurityContextConstraintsInterface has methods to work with SecurityContextConstraints resources. +type SecurityContextConstraintsInterface interface { + Create(*api.SecurityContextConstraints) (*api.SecurityContextConstraints, error) + Update(*api.SecurityContextConstraints) (*api.SecurityContextConstraints, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.SecurityContextConstraints, error) + List(opts api.ListOptions) (*api.SecurityContextConstraintsList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + SecurityContextConstraintsExpansion +} + +// securityContextConstraints implements SecurityContextConstraintsInterface +type securityContextConstraints struct { + client *CoreClient +} + +// newSecurityContextConstraints returns a SecurityContextConstraints +func newSecurityContextConstraints(c *CoreClient) *securityContextConstraints { + return &securityContextConstraints{ + client: c, + } +} + +// Create takes the representation of a securityContextConstraints and creates it. Returns the server's representation of the securityContextConstraints, and an error, if there is any. +func (c *securityContextConstraints) Create(securityContextConstraints *api.SecurityContextConstraints) (result *api.SecurityContextConstraints, err error) { + result = &api.SecurityContextConstraints{} + err = c.client.Post(). + Resource("securitycontextconstraints"). + Body(securityContextConstraints). + Do(). + Into(result) + return +} + +// Update takes the representation of a securityContextConstraints and updates it. Returns the server's representation of the securityContextConstraints, and an error, if there is any. +func (c *securityContextConstraints) Update(securityContextConstraints *api.SecurityContextConstraints) (result *api.SecurityContextConstraints, err error) { + result = &api.SecurityContextConstraints{} + err = c.client.Put(). + Resource("securitycontextconstraints"). + Name(securityContextConstraints.Name). + Body(securityContextConstraints). + Do(). + Into(result) + return +} + +// Delete takes name of the securityContextConstraints and deletes it. Returns an error if one occurs. +func (c *securityContextConstraints) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("securitycontextconstraints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *securityContextConstraints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("securitycontextconstraints"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the securityContextConstraints, and returns the corresponding securityContextConstraints object, and an error if there is any. +func (c *securityContextConstraints) Get(name string) (result *api.SecurityContextConstraints, err error) { + result = &api.SecurityContextConstraints{} + err = c.client.Get(). + Resource("securitycontextconstraints"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of SecurityContextConstraints that match those selectors. +func (c *securityContextConstraints) List(opts api.ListOptions) (result *api.SecurityContextConstraintsList, err error) { + result = &api.SecurityContextConstraintsList{} + err = c.client.Get(). + Resource("securitycontextconstraints"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested securityContextConstraints. +func (c *securityContextConstraints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("securitycontextconstraints"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints_expansion.go new file mode 100644 index 00000000..91503f65 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/securitycontextconstraints_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import "k8s.io/kubernetes/pkg/api" + +// The SecurityContextConstraintsExpansion interface allows manually adding extra methods to the +// SecurityContextConstraintsInterface. +type SecurityContextConstraintsExpansion interface { + // PatchStatus modifies the status of an existing security context constraint. It returns the copy + // of the node that the server returns, or an error. + PatchStatus(name string, data []byte) (*api.SecurityContextConstraints, error) +} + +// PatchStatus modifies the status of an existing security context constraint. It returns the copy of +// the security context constraint that the server returns, or an error. +func (c *securityContextConstraints) PatchStatus(nodeName string, data []byte) (*api.SecurityContextConstraints, error) { + result := &api.SecurityContextConstraints{} + err := c.client.Patch(api.StrategicMergePatchType). + Resource("securitycontextconstraints"). + Name(nodeName). + SubResource("status"). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go new file mode 100644 index 00000000..006f601c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go @@ -0,0 +1,149 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*api.Service) (*api.Service, error) + Update(*api.Service) (*api.Service, error) + UpdateStatus(*api.Service) (*api.Service, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.Service, error) + List(opts api.ListOptions) (*api.ServiceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client *CoreClient + ns string +} + +// newServices returns a Services +func newServices(c *CoreClient, namespace string) *services { + return &services{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string) (result *api.Service, err error) { + result = &api.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { + result = &api.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go new file mode 100644 index 00000000..89266e6c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/util/net" +) + +// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. +type ServiceExpansion interface { + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.client.Get(). + Prefix("proxy"). + Namespace(c.ns). + Resource("services"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go new file mode 100644 index 00000000..65f7df26 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*api.ServiceAccount) (*api.ServiceAccount, error) + Update(*api.ServiceAccount) (*api.ServiceAccount, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*api.ServiceAccount, error) + List(opts api.ListOptions) (*api.ServiceAccountList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client *CoreClient + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { + result = &api.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { + result = &api.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string) (result *api.ServiceAccount, err error) { + result = &api.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts api.ListOptions) (result *api.ServiceAccountList, err error) { + result = &api.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go new file mode 100644 index 00000000..96dae583 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*extensions.DaemonSet) (*extensions.DaemonSet, error) + Update(*extensions.DaemonSet) (*extensions.DaemonSet, error) + UpdateStatus(*extensions.DaemonSet) (*extensions.DaemonSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.DaemonSet, error) + List(opts api.ListOptions) (*extensions.DaemonSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client *ExtensionsClient + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { + return &daemonSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +func (c *daemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { + result = &extensions.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go new file mode 100644 index 00000000..3b995c02 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*extensions.Deployment) (*extensions.Deployment, error) + Update(*extensions.Deployment) (*extensions.Deployment, error) + UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.Deployment, error) + List(opts api.ListOptions) (*extensions.DeploymentList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client *ExtensionsClient + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsClient, namespace string) *deployments { + return &deployments{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { + result = &extensions.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go new file mode 100644 index 00000000..9969aecc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import "k8s.io/kubernetes/pkg/apis/extensions" + +// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. +type DeploymentExpansion interface { + Rollback(*extensions.DeploymentRollback) error +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go new file mode 100644 index 00000000..47517b64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go new file mode 100644 index 00000000..9b9f4749 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go @@ -0,0 +1,131 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type ExtensionsInterface interface { + GetRESTClient() *restclient.RESTClient + DaemonSetsGetter + DeploymentsGetter + IngressesGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter + ScalesGetter + ThirdPartyResourcesGetter +} + +// ExtensionsClient is used to interact with features provided by the Extensions group. +type ExtensionsClient struct { + *restclient.RESTClient +} + +func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +// NewForConfig creates a new ExtensionsClient for the given config. +func NewForConfig(c *restclient.Config) (*ExtensionsClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsClient{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *ExtensionsClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsClient for the given RESTClient. +func New(c *restclient.RESTClient) *ExtensionsClient { + return &ExtensionsClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if extensions group is not registered, return an error + g, err := registered.Group("extensions") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go new file mode 100644 index 00000000..7a199945 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type DaemonSetExpansion interface{} + +type HorizontalPodAutoscalerExpansion interface{} + +type IngressExpansion interface{} + +type JobExpansion interface{} + +type PodSecurityPolicyExpansion interface{} + +type ThirdPartyResourceExpansion interface{} + +type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go new file mode 100644 index 00000000..a9d950ea --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*extensions.Ingress) (*extensions.Ingress, error) + Update(*extensions.Ingress) (*extensions.Ingress, error) + UpdateStatus(*extensions.Ingress) (*extensions.Ingress, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.Ingress, error) + List(opts api.ListOptions) (*extensions.IngressList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client *ExtensionsClient + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsClient, namespace string) *ingresses { + return &ingresses{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +func (c *ingresses) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts api.ListOptions) (result *extensions.IngressList, err error) { + result = &extensions.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go new file mode 100644 index 00000000..4ae3f6ca --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + batch "k8s.io/kubernetes/pkg/apis/batch" + watch "k8s.io/kubernetes/pkg/watch" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*batch.Job) (*batch.Job, error) + Update(*batch.Job) (*batch.Job, error) + UpdateStatus(*batch.Job) (*batch.Job, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*batch.Job, error) + List(opts api.ListOptions) (*batch.JobList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client *ExtensionsClient + ns string +} + +// newJobs returns a Jobs +func newJobs(c *ExtensionsClient, namespace string) *jobs { + return &jobs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go new file mode 100644 index 00000000..06a7908f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.PodSecurityPolicy, error) + List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client *ExtensionsClient +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsClient) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c, + } +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts api.ListOptions) (result *extensions.PodSecurityPolicyList, err error) { + result = &extensions.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go new file mode 100644 index 00000000..6257fd89 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go @@ -0,0 +1,150 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) + Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) + UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.ReplicaSet, error) + List(opts api.ListOptions) (*extensions.ReplicaSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client *ExtensionsClient + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { + return &replicaSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { + result = &extensions.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go new file mode 100644 index 00000000..7e54bc34 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client *ExtensionsClient + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsClient, namespace string) *scales { + return &scales{ + client: c, + ns: namespace, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go new file mode 100644 index 00000000..61a77f26 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*extensions.Scale, error) + Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) { + result = &extensions.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { + result = &extensions.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go new file mode 100644 index 00000000..a64ffb62 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. +// A group's client should implement this interface. +type ThirdPartyResourcesGetter interface { + ThirdPartyResources() ThirdPartyResourceInterface +} + +// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. +type ThirdPartyResourceInterface interface { + Create(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) + Update(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*extensions.ThirdPartyResource, error) + List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ThirdPartyResourceExpansion +} + +// thirdPartyResources implements ThirdPartyResourceInterface +type thirdPartyResources struct { + client *ExtensionsClient +} + +// newThirdPartyResources returns a ThirdPartyResources +func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { + return &thirdPartyResources{ + client: c, + } +} + +// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.client.Post(). + Resource("thirdpartyresources"). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.client.Put(). + Resource("thirdpartyresources"). + Name(thirdPartyResource.Name). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. +func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. +func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.client.Get(). + Resource("thirdpartyresources"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. +func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { + result = &extensions.ThirdPartyResourceList{} + err = c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("thirdpartyresources"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go new file mode 100644 index 00000000..5d0b3912 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*rbac.ClusterRole) (*rbac.ClusterRole, error) + Update(*rbac.ClusterRole) (*rbac.ClusterRole, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.ClusterRole, error) + List(opts api.ListOptions) (*rbac.ClusterRoleList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client *RbacClient +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacClient) *clusterRoles { + return &clusterRoles{ + client: c, + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { + result = &rbac.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go new file mode 100644 index 00000000..f2102592 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Update(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.ClusterRoleBinding, error) + List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client *RbacClient +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c, + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { + result = &rbac.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go new file mode 100644 index 00000000..47517b64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with the default arguments. + +// This package has the automatically generated typed clients. +package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go new file mode 100644 index 00000000..a3b9c689 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go new file mode 100644 index 00000000..4d67337c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" +) + +type RbacInterface interface { + GetRESTClient() *restclient.RESTClient + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacClient is used to interact with features provided by the Rbac group. +type RbacClient struct { + *restclient.RESTClient +} + +func (c *RbacClient) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacClient) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacClient for the given config. +func NewForConfig(c *restclient.Config) (*RbacClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacClient{client}, nil +} + +// NewForConfigOrDie creates a new RbacClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *RbacClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacClient for the given RESTClient. +func New(c *restclient.RESTClient) *RbacClient { + return &RbacClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if rbac group is not registered, return an error + g, err := registered.Group("rbac.authorization.k8s.io") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = api.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go new file mode 100644 index 00000000..68e7ebe9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*rbac.Role) (*rbac.Role, error) + Update(*rbac.Role) (*rbac.Role, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.Role, error) + List(opts api.ListOptions) (*rbac.RoleList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client *RbacClient + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacClient, namespace string) *roles { + return &roles{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { + result = &rbac.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go new file mode 100644 index 00000000..c73318c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + api "k8s.io/kubernetes/pkg/api" + rbac "k8s.io/kubernetes/pkg/apis/rbac" + watch "k8s.io/kubernetes/pkg/watch" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*rbac.RoleBinding) (*rbac.RoleBinding, error) + Update(*rbac.RoleBinding) (*rbac.RoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*rbac.RoleBinding, error) + List(opts api.ListOptions) (*rbac.RoleBindingList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client *RbacClient + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacClient, namespace string) *roleBindings { + return &roleBindings{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { + result = &rbac.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go new file mode 100644 index 00000000..efa66fc8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go @@ -0,0 +1,67 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides utilities for registering client metrics to Prometheus. +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const restClientSubsystem = "rest_client" + +var ( + // RequestLatency is a Prometheus Summary metric type partitioned by + // "verb" and "url" labels. It is used for the rest client latency metrics. + RequestLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: restClientSubsystem, + Name: "request_latency_microseconds", + Help: "Request latency in microseconds. Broken down by verb and URL", + MaxAge: time.Hour, + }, + []string{"verb", "url"}, + ) + + RequestResult = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: restClientSubsystem, + Name: "request_status_codes", + Help: "Number of http requests, partitioned by metadata", + }, + []string{"code", "method", "host"}, + ) +) + +var registerMetrics sync.Once + +// Register registers all metrics to Prometheus with +// respect to the RequestLatency. +func Register() { + // Register the metrics. + registerMetrics.Do(func() { + prometheus.MustRegister(RequestLatency) + prometheus.MustRegister(RequestResult) + }) +} + +// Calculates the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/doc.go b/vendor/k8s.io/kubernetes/pkg/client/record/doc.go new file mode 100644 index 00000000..d9551543 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/record/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting events. +package record diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/event.go b/vendor/k8s.io/kubernetes/pkg/client/record/event.go new file mode 100644 index 00000000..47cbe3ec --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/record/event.go @@ -0,0 +1,315 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" + "net/http" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *api.Event) (*api.Event, error) + Update(event *api.Event) (*api.Event, error) + Patch(oldEvent *api.Event, data []byte) (*api.Event, error) +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. + PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*api.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(source api.EventSource) EventRecorder +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + // The default math/rand package functions aren't thread safe, so create a + // new Rand object for each StartRecording call. + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + eventCorrelator := NewEventCorrelator(util.RealClock{}) + return eventBroadcaster.StartEventWatcher( + func(event *api.Event) { + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + }) +} + +func recordToSink(sink EventSink, event *api.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +func isKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *api.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *api.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return eventBroadcaster.StartEventWatcher( + func(e *api.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface { + watcher := eventBroadcaster.Watch() + go func() { + defer utilruntime.HandleCrash() + for { + watchEvent, open := <-watcher.ResultChan() + if !open { + return + } + event, ok := watchEvent.Object.(*api.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(source api.EventSource) EventRecorder { + return &recorderImpl{source, eventBroadcaster.Broadcaster, util.RealClock{}} +} + +type recorderImpl struct { + source api.EventSource + *watch.Broadcaster + clock util.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) { + ref, err := api.GetReference(object) + if err != nil { + glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !validateEventType(eventtype) { + glog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func validateEventType(eventtype string) bool { + switch eventtype { + case api.EventTypeNormal, api.EventTypeWarning: + return true + } + return false +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, unversioned.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event { + t := unversioned.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = api.NamespaceDefault + } + return &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go b/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go new file mode 100644 index 00000000..fa76db79 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go @@ -0,0 +1,360 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/strategicpatch" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *api.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *api.Event) bool + +// DefaultEventFilterFunc returns false for all incoming events +func DefaultEventFilterFunc(event *api.Event) bool { + return false +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason +func EventAggregatorByReasonFunc(event *api.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *api.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *api.Event) string { + return "(events with common reason combined)" +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents int + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds int + + // clock is used to allow for testing over a time interval + clock util.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock util.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: maxEvents, + maxIntervalInSeconds: maxIntervalInSeconds, + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp unversioned.Time +} + +// EventAggregate identifies similar events and groups into a common event if required +func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error) { + aggregateKey, localKey := e.keyFunc(newEvent) + now := unversioned.NewTime(e.clock.Now()) + record := aggregateRecord{localKeys: sets.NewString(), lastTimestamp: now} + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // if the last event was far enough in the past, it is not aggregated, and we must reset state + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + if record.localKeys.Len() < e.maxEvents { + return newEvent, nil + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event + eventCopy := &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, nil +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count int + + // The time at which the event was first recorded. + firstTimestamp unversioned.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock util.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock util.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records the event, and determines if its frequency should update +func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, error) { + var ( + patch []byte + err error + ) + key := getEventKey(newEvent) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + lastObservation := e.lastEventObservationFromCache(key) + + // we have seen this event before, so we must prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = unversioned.NewTime(time.Unix(0, 0)) + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateStrategicMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: int(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *api.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: int(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *api.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * No events are filtered from being recorded +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +func NewEventCorrelator(clock util.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + return &EventCorrelator{ + filterFunc: DefaultEventFilterFunc, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + logger: newEventLogger(cacheSize, clock), + } +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *api.Event) (*EventCorrelateResult, error) { + if c.filterFunc(newEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + aggregateEvent, err := c.aggregator.EventAggregate(newEvent) + if err != nil { + return &EventCorrelateResult{}, err + } + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent) + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *api.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/fake.go b/vendor/k8s.io/kubernetes/pkg/client/record/fake.go new file mode 100644 index 00000000..35204ef2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/record/fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } +} + +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go new file mode 100644 index 00000000..230edd45 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go @@ -0,0 +1,228 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +const ( + // Environment variables: Note that the duration should be long enough that the backoff + // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". + envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" + envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" +) + +// RESTClient imposes common Kubernetes API conventions on a set of resource paths. +// The baseURL is expected to point to an HTTP or HTTPS path that is the parent +// of one or more resources. The server should return a decodable API resource +// object, or an api.Status object which contains information about the reason for +// any failure. +// +// Most consumers should use client.New() to get a Kubernetes API client. +type RESTClient struct { + // base is the root URL for all invocations of the client + base *url.URL + // versionedAPIPath is a path segment connecting the base URL to the resource root + versionedAPIPath string + + // contentConfig is the information used to communicate with the server. + contentConfig ContentConfig + + // serializers contain all serializers for undelying content type. + serializers Serializers + + // creates BackoffManager that is passed to requests. + createBackoffMgr func() BackoffManager + + // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. + Throttle flowcontrol.RateLimiter + + // Set specific behavior of the client. If not set http.DefaultClient will be used. + Client *http.Client +} + +type Serializers struct { + Encoder runtime.Encoder + Decoder runtime.Decoder + StreamingSerializer runtime.Serializer + Framer runtime.Framer + RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) +} + +// NewRESTClient creates a new RESTClient. This client performs generic REST functions +// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and +// decoding of responses from the server. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + base := *baseURL + if !strings.HasSuffix(base.Path, "/") { + base.Path += "/" + } + base.RawQuery = "" + base.Fragment = "" + + if config.GroupVersion == nil { + config.GroupVersion = &unversioned.GroupVersion{} + } + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + serializers, err := createSerializers(config) + if err != nil { + return nil, err + } + + var throttle flowcontrol.RateLimiter + if maxQPS > 0 && rateLimiter == nil { + throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) + } else if rateLimiter != nil { + throttle = rateLimiter + } + return &RESTClient{ + base: &base, + versionedAPIPath: versionedAPIPath, + contentConfig: config, + serializers: *serializers, + createBackoffMgr: readExpBackoffConfig, + Throttle: throttle, + Client: client, + }, nil +} + +// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client +func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { + if c == nil { + return nil + } + return c.Throttle +} + +// readExpBackoffConfig handles the internal logic of determining what the +// backoff policy is. By default if no information is available, NoBackoff. +// TODO Generalize this see #17727 . +func readExpBackoffConfig() BackoffManager { + backoffBase := os.Getenv(envBackoffBase) + backoffDuration := os.Getenv(envBackoffDuration) + + backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) + backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) + if errBase != nil || errDuration != nil { + return &NoBackoff{} + } + return &URLBackoff{ + Backoff: flowcontrol.NewBackOff( + time.Duration(backoffBaseInt)*time.Second, + time.Duration(backoffDurationInt)*time.Second)} +} + +// createSerializers creates all necessary serializers for given contentType. +func createSerializers(config ContentConfig) (*Serializers, error) { + negotiated := config.NegotiatedSerializer + contentType := config.ContentType + info, ok := negotiated.SerializerForMediaType(contentType, nil) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + streamInfo, ok := negotiated.StreamingSerializerForMediaType(contentType, nil) + if !ok { + return nil, fmt.Errorf("streaming serializer for %s not registered", contentType) + } + internalGV := unversioned.GroupVersion{ + Group: config.GroupVersion.Group, + Version: runtime.APIVersionInternal, + } + return &Serializers{ + Encoder: negotiated.EncoderForVersion(info.Serializer, *config.GroupVersion), + Decoder: negotiated.DecoderToVersion(info.Serializer, internalGV), + StreamingSerializer: streamInfo.Serializer, + Framer: streamInfo.Framer, + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + renegotiated, ok := negotiated.SerializerForMediaType(contentType, params) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + return negotiated.DecoderToVersion(renegotiated.Serializer, internalGV), nil + }, + }, nil +} + +// Verb begins a request with a verb (GET, POST, PUT, DELETE). +// +// Example usage of RESTClient's request building interface: +// c, err := NewRESTClient(...) +// if err != nil { ... } +// resp, err := c.Verb("GET"). +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// if err != nil { ... } +// list, ok := resp.(*api.PodList) +// +func (c *RESTClient) Verb(verb string) *Request { + backoff := c.createBackoffMgr() + + if c.Client == nil { + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) + } + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) +} + +// Post begins a POST request. Short for c.Verb("POST"). +func (c *RESTClient) Post() *Request { + return c.Verb("POST") +} + +// Put begins a PUT request. Short for c.Verb("PUT"). +func (c *RESTClient) Put() *Request { + return c.Verb("PUT") +} + +// Patch begins a PATCH request. Short for c.Verb("Patch"). +func (c *RESTClient) Patch(pt api.PatchType) *Request { + return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) +} + +// Get begins a GET request. Short for c.Verb("GET"). +func (c *RESTClient) Get() *Request { + return c.Verb("GET") +} + +// Delete begins a DELETE request. Short for c.Verb("DELETE"). +func (c *RESTClient) Delete() *Request { + return c.Verb("DELETE") +} + +// APIVersion returns the APIVersion this RESTClient is expected to use. +func (c *RESTClient) APIVersion() unversioned.GroupVersion { + return *c.contentConfig.GroupVersion +} + +func (c *RESTClient) Codec() runtime.Codec { + return c.contentConfig.Codec +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go new file mode 100644 index 00000000..0741e3c2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go @@ -0,0 +1,328 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + gruntime "runtime" + "strings" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/crypto" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/version" +) + +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type Config struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + // Prefix is the sub path of the server. If not specified, the client will set + // a default value. Use "/" to indicate the server root should be used + Prefix string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + ContentConfig + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Impersonate is the username that this RESTClient will impersonate + Impersonate string + + // Server requires plugin-specified authentication. + AuthProvider *clientcmdapi.AuthProviderConfig + + // Callback to persist config for AuthProvider. + AuthConfigPersister AuthProviderConfigPersister + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig + + // Server should be accessed without verifying the TLS + // certificate. For testing only. + Insecure bool + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // Transport may be used for custom HTTP behavior. This attribute may not + // be specified with the TLS client certificate options. Use WrapTransport + // for most client level operations. + Transport http.RoundTripper + // WrapTransport will be invoked for custom HTTP behavior after the underlying + // transport is initialized (either the transport created from TLSClientConfig, + // Transport, or http.DefaultTransport). The config may layer other RoundTrippers + // on top of the returned RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper + + // QPS indicates the maximum QPS to the master from this client. If zero, QPS is unlimited. + QPS float32 + + // Maximum burst for throttle + Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter +} + +// TLSClientConfig contains settings to enable transport layer security +type TLSClientConfig struct { + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +type ContentConfig struct { + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server, and + // as the default content type on any object sent to the server. If not set, + // "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. + GroupVersion *unversioned.GroupVersion + // NegotiatedSerializer is used for obtaining encoders and decoders for multiple + // supported media types. + NegotiatedSerializer runtime.NegotiatedSerializer + + // Codec specifies the encoding and decoding behavior for runtime.Objects passed + // to a RESTClient or Client. Required when initializing a RESTClient, optional + // when initializing a Client. + // + // DEPRECATED: Please use NegotiatedSerializer instead. + // Codec is currently used only in some tests and will be removed soon. + // All production setups should use NegotiatedSerializer. + Codec runtime.Codec +} + +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func RESTClientFor(config *Config) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) +} + +// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows +// the config.Version to be empty. +func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + } + + versionConfig := config.ContentConfig + if versionConfig.GroupVersion == nil { + v := unversioned.SchemeGroupVersion + versionConfig.GroupVersion = &v + } + + return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) +} + +// SetKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +func SetKubernetesDefaults(config *Config) error { + if len(config.UserAgent) == 0 { + config.UserAgent = DefaultKubernetesUserAgent() + } + if config.QPS == 0.0 { + config.QPS = 5.0 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} + +// DefaultKubernetesUserAgent returns the default user agent that clients can use. +func DefaultKubernetesUserAgent() string { + commit := version.Get().GitCommit + if len(commit) > 7 { + commit = commit[:7] + } + if len(commit) == 0 { + commit = "unknown" + } + version := version.Get().GitVersion + seg := strings.SplitN(version, "-", 2) + version = seg[0] + return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) +} + +// InClusterConfig returns a config object which uses the service account +// kubernetes gives to pods. It's intended for clients that expect to be +// running inside a pod running on kuberenetes. It will return an error if +// called from a process not running in a kubernetes environment. +func InClusterConfig() (*Config, error) { + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + if len(host) == 0 || len(port) == 0 { + return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") + } + + token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) + if err != nil { + return nil, err + } + tlsClientConfig := TLSClientConfig{} + rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey + if _, err := crypto.CertPoolFromFile(rootCAFile); err != nil { + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + } else { + tlsClientConfig.CAFile = rootCAFile + } + + return &Config{ + // TODO: switch to using cluster DNS. + Host: "https://" + net.JoinHostPort(host, port), + BearerToken: string(token), + TLSClientConfig: tlsClientConfig, + }, nil +} + +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func IsConfigTransportTLS(config Config) bool { + baseURL, _, err := defaultServerUrlFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func LoadTLSFiles(c *Config) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +func AddUserAgent(config *Config, userAgent string) *Config { + fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent + config.UserAgent = fullUserAgent + return config +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go new file mode 100644 index 00000000..4752e375 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "fmt" + "net/http" + "sync" + + "github.com/golang/glog" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +type AuthProvider interface { + // WrapTransport allows the plugin to create a modified RoundTripper that + // attaches authorization headers (or other info) to requests. + WrapTransport(http.RoundTripper) http.RoundTripper + // Login allows the plugin to initialize its configuration. It must not + // require direct user interaction. + Login() error +} + +// Factory generates an AuthProvider plugin. +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. +type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) + +// AuthProviderConfigPersister allows a plugin to persist configuration info +// for just itself. +type AuthProviderConfigPersister interface { + Persist(map[string]string) error +} + +// All registered auth provider plugins. +var pluginsLock sync.Mutex +var plugins = make(map[string]Factory) + +func RegisterAuthProviderPlugin(name string, plugin Factory) error { + pluginsLock.Lock() + defer pluginsLock.Unlock() + if _, found := plugins[name]; found { + return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + } + glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + plugins[name] = plugin + return nil +} + +func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) { + pluginsLock.Lock() + defer pluginsLock.Unlock() + p, ok := plugins[apc.Name] + if !ok { + return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + } + return p(clusterAddress, apc.Config, persister) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go new file mode 100644 index 00000000..e406450a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go @@ -0,0 +1,1086 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/client/metrics" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/streaming" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/watch" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +var ( + // specialParams lists parameters that are handled specially and which users of Request + // are therefore not allowed to set manually. + specialParams = sets.NewString("timeout") + + // longThrottleLatency defines threshold for logging requests. All requests being + // throttle for more than longThrottleLatency will be logged. + longThrottleLatency = 50 * time.Millisecond +) + +func init() { + metrics.Register() +} + +// HTTPClient is an interface for testing a request object. +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// ResponseWrapper is an interface for getting a response. +// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream. +type ResponseWrapper interface { + DoRaw() ([]byte, error) + Stream() (io.ReadCloser, error) +} + +// RequestConstructionError is returned when there's an error assembling a request. +type RequestConstructionError struct { + Err error +} + +// Error returns a textual description of 'r'. +func (r *RequestConstructionError) Error() string { + return fmt.Sprintf("request construction error: '%v'", r.Err) +} + +// Request allows for building up a request to a server in a chained fashion. +// Any errors are stored until the end of your call, so you only have to +// check once. +type Request struct { + // required + client HTTPClient + verb string + + baseURL *url.URL + content ContentConfig + serializers Serializers + + // generic components accessible via method setters + pathPrefix string + subpath string + params url.Values + headers http.Header + + // structural elements of the request that are part of the Kubernetes API conventions + namespace string + namespaceSet bool + resource string + resourceName string + subresource string + selector labels.Selector + timeout time.Duration + + // output + err error + body io.Reader + + // The constructed request and the response + req *http.Request + resp *http.Response + + backoffMgr BackoffManager + throttle flowcontrol.RateLimiter +} + +// NewRequest creates a new request helper object for accessing runtime.Objects on a server. +func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request { + if backoff == nil { + glog.V(2).Infof("Not implementing request backoff strategy.") + backoff = &NoBackoff{} + } + + pathPrefix := "/" + if baseURL != nil { + pathPrefix = path.Join(pathPrefix, baseURL.Path) + } + r := &Request{ + client: client, + verb: verb, + baseURL: baseURL, + pathPrefix: path.Join(pathPrefix, versionedAPIPath), + content: content, + serializers: serializers, + backoffMgr: backoff, + throttle: throttle, + } + if len(content.ContentType) > 0 { + r.SetHeader("Accept", content.ContentType+", */*") + } + return r +} + +// Prefix adds segments to the relative beginning to the request path. These +// items will be placed before the optional Namespace, Resource, or Name sections. +// Setting AbsPath will clear any previously set Prefix segments +func (r *Request) Prefix(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...)) + return r +} + +// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional +// Namespace, Resource, or Name sections. +func (r *Request) Suffix(segments ...string) *Request { + if r.err != nil { + return r + } + r.subpath = path.Join(r.subpath, path.Join(segments...)) + return r +} + +// Resource sets the resource to access (/[ns//]) +func (r *Request) Resource(resource string) *Request { + if r.err != nil { + return r + } + if len(r.resource) != 0 { + r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) + return r + } + if msgs := validation.IsValidPathSegmentName(resource); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) + return r + } + r.resource = resource + return r +} + +// SubResource sets a sub-resource path which can be multiple segments segment after the resource +// name but before the suffix. +func (r *Request) SubResource(subresources ...string) *Request { + if r.err != nil { + return r + } + subresource := path.Join(subresources...) + if len(r.subresource) != 0 { + r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource) + return r + } + for _, s := range subresources { + if msgs := validation.IsValidPathSegmentName(s); len(msgs) != 0 { + r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) + return r + } + } + r.subresource = subresource + return r +} + +// Name sets the name of a resource to access (/[ns//]) +func (r *Request) Name(resourceName string) *Request { + if r.err != nil { + return r + } + if len(resourceName) == 0 { + r.err = fmt.Errorf("resource name may not be empty") + return r + } + if len(r.resourceName) != 0 { + r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) + return r + } + if msgs := validation.IsValidPathSegmentName(resourceName); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) + return r + } + r.resourceName = resourceName + return r +} + +// Namespace applies the namespace scope to a request (/[ns//]) +func (r *Request) Namespace(namespace string) *Request { + if r.err != nil { + return r + } + if r.namespaceSet { + r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) + return r + } + if msgs := validation.IsValidPathSegmentName(namespace); len(msgs) != 0 { + r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) + return r + } + r.namespaceSet = true + r.namespace = namespace + return r +} + +// NamespaceIfScoped is a convenience function to set a namespace if scoped is true +func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request { + if scoped { + return r.Namespace(namespace) + } + return r +} + +// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved +// when a single segment is passed. +func (r *Request) AbsPath(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) + if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { + // preserve any trailing slashes for legacy behavior + r.pathPrefix += "/" + } + return r +} + +// RequestURI overwrites existing path and parameters with the value of the provided server relative +// URI. Some parameters (those in specialParameters) cannot be overwritten. +func (r *Request) RequestURI(uri string) *Request { + if r.err != nil { + return r + } + locator, err := url.Parse(uri) + if err != nil { + r.err = err + return r + } + r.pathPrefix = locator.Path + if len(locator.Query()) > 0 { + if r.params == nil { + r.params = make(url.Values) + } + for k, v := range locator.Query() { + r.params[k] = v + } + } + return r +} + +const ( + // A constant that clients can use to refer in a field selector to the object name field. + // Will be automatically emitted as the correct name for the API version. + nodeUnschedulable = "spec.unschedulable" + objectNameField = "metadata.name" + podHost = "spec.nodeName" + podStatus = "status.phase" + secretType = "type" + + eventReason = "reason" + eventSource = "source" + eventType = "type" + eventInvolvedKind = "involvedObject.kind" + eventInvolvedNamespace = "involvedObject.namespace" + eventInvolvedName = "involvedObject.name" + eventInvolvedUID = "involvedObject.uid" + eventInvolvedAPIVersion = "involvedObject.apiVersion" + eventInvolvedResourceVersion = "involvedObject.resourceVersion" + eventInvolvedFieldPath = "involvedObject.fieldPath" +) + +type clientFieldNameToAPIVersionFieldName map[string]string + +func (c clientFieldNameToAPIVersionFieldName) filterField(field, value string) (newField, newValue string, err error) { + newFieldName, ok := c[field] + if !ok { + return "", "", fmt.Errorf("%v - %v - no field mapping defined", field, value) + } + return newFieldName, value, nil +} + +type resourceTypeToFieldMapping map[string]clientFieldNameToAPIVersionFieldName + +func (r resourceTypeToFieldMapping) filterField(resourceType, field, value string) (newField, newValue string, err error) { + fMapping, ok := r[resourceType] + if !ok { + return "", "", fmt.Errorf("%v - %v - %v - no field mapping defined", resourceType, field, value) + } + return fMapping.filterField(field, value) +} + +type versionToResourceToFieldMapping map[unversioned.GroupVersion]resourceTypeToFieldMapping + +func (v versionToResourceToFieldMapping) filterField(groupVersion *unversioned.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { + rMapping, ok := v[*groupVersion] + if !ok { + // glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", groupVersion, resourceType, field, value) + return field, value, nil + } + newField, newValue, err = rMapping.filterField(resourceType, field, value) + if err != nil { + // This is only a warning until we find and fix all of the client's usages. + // glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", groupVersion, resourceType, field, value) + return field, value, nil + } + return newField, newValue, nil +} + +var fieldMappings = versionToResourceToFieldMapping{ + v1.SchemeGroupVersion: resourceTypeToFieldMapping{ + "nodes": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + nodeUnschedulable: nodeUnschedulable, + }, + "pods": clientFieldNameToAPIVersionFieldName{ + podHost: podHost, + podStatus: podStatus, + }, + "secrets": clientFieldNameToAPIVersionFieldName{ + secretType: secretType, + }, + "serviceAccounts": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + }, + "endpoints": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + }, + "events": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + eventReason: eventReason, + eventSource: eventSource, + eventType: eventType, + eventInvolvedKind: eventInvolvedKind, + eventInvolvedNamespace: eventInvolvedNamespace, + eventInvolvedName: eventInvolvedName, + eventInvolvedUID: eventInvolvedUID, + eventInvolvedAPIVersion: eventInvolvedAPIVersion, + eventInvolvedResourceVersion: eventInvolvedResourceVersion, + eventInvolvedFieldPath: eventInvolvedFieldPath, + }, + }, +} + +// FieldsSelectorParam adds the given selector as a query parameter with the name paramName. +func (r *Request) FieldsSelectorParam(s fields.Selector) *Request { + if r.err != nil { + return r + } + if s == nil { + return r + } + if s.Empty() { + return r + } + s2, err := s.Transform(func(field, value string) (newField, newValue string, err error) { + return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) + }) + if err != nil { + r.err = err + return r + } + return r.setParam(unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) +} + +// LabelsSelectorParam adds the given selector as a query parameter +func (r *Request) LabelsSelectorParam(s labels.Selector) *Request { + if r.err != nil { + return r + } + if s == nil { + return r + } + if s.Empty() { + return r + } + return r.setParam(unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) +} + +// UintParam creates a query parameter with the given value. +func (r *Request) UintParam(paramName string, u uint64) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, strconv.FormatUint(u, 10)) +} + +// Param creates a query parameter with the given string value. +func (r *Request) Param(paramName, s string) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, s) +} + +// VersionedParams will take the provided object, serialize it to a map[string][]string using the +// implicit RESTClient API version and the default parameter codec, and then add those as parameters +// to the request. Use this to provide versioned query parameters from client libraries. +func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { + if r.err != nil { + return r + } + params, err := codec.EncodeParameters(obj, *r.content.GroupVersion) + if err != nil { + r.err = err + return r + } + for k, v := range params { + for _, value := range v { + // TODO: Move it to setParam method, once we get rid of + // FieldSelectorParam & LabelSelectorParam methods. + if k == unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { + // Don't set an empty selector for backward compatibility. + // Since there is no way to get the difference between empty + // and unspecified string, we don't set it to avoid having + // labelSelector= param in every request. + continue + } + if k == unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()) { + if len(value) == 0 { + // Don't set an empty selector for backward compatibility. + // Since there is no way to get the difference between empty + // and unspecified string, we don't set it to avoid having + // fieldSelector= param in every request. + continue + } + // TODO: Filtering should be handled somewhere else. + selector, err := fields.ParseSelector(value) + if err != nil { + r.err = fmt.Errorf("unparsable field selector: %v", err) + return r + } + filteredSelector, err := selector.Transform( + func(field, value string) (newField, newValue string, err error) { + return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) + }) + if err != nil { + r.err = fmt.Errorf("untransformable field selector: %v", err) + return r + } + value = filteredSelector.String() + } + + r.setParam(k, value) + } + } + return r +} + +func (r *Request) setParam(paramName, value string) *Request { + if specialParams.Has(paramName) { + r.err = fmt.Errorf("must set %v through the corresponding function, not directly.", paramName) + return r + } + if r.params == nil { + r.params = make(url.Values) + } + r.params[paramName] = append(r.params[paramName], value) + return r +} + +func (r *Request) SetHeader(key, value string) *Request { + if r.headers == nil { + r.headers = http.Header{} + } + r.headers.Set(key, value) + return r +} + +// Timeout makes the request use the given duration as a timeout. Sets the "timeout" +// parameter. +func (r *Request) Timeout(d time.Duration) *Request { + if r.err != nil { + return r + } + r.timeout = d + return r +} + +// Body makes the request use obj as the body. Optional. +// If obj is a string, try to read a file of that name. +// If obj is a []byte, send it directly. +// If obj is an io.Reader, use it directly. +// If obj is a runtime.Object, marshal it correctly, and set Content-Type header. +// If obj is a runtime.Object and nil, do nothing. +// Otherwise, set an error. +func (r *Request) Body(obj interface{}) *Request { + if r.err != nil { + return r + } + switch t := obj.(type) { + case string: + data, err := ioutil.ReadFile(t) + if err != nil { + r.err = err + return r + } + glog.V(8).Infof("Request Body: %s", string(data)) + r.body = bytes.NewReader(data) + case []byte: + glog.V(8).Infof("Request Body: %s", string(t)) + r.body = bytes.NewReader(t) + case io.Reader: + r.body = t + case runtime.Object: + // callers may pass typed interface pointers, therefore we must check nil with reflection + if reflect.ValueOf(t).IsNil() { + return r + } + data, err := runtime.Encode(r.serializers.Encoder, t) + if err != nil { + r.err = err + return r + } + glog.V(8).Infof("Request Body: %s", string(data)) + r.body = bytes.NewReader(data) + r.SetHeader("Content-Type", r.content.ContentType) + default: + r.err = fmt.Errorf("unknown type used for body: %+v", obj) + } + return r +} + +// URL returns the current working URL. +func (r *Request) URL() *url.URL { + p := r.pathPrefix + if r.namespaceSet && len(r.namespace) > 0 { + p = path.Join(p, "namespaces", r.namespace) + } + if len(r.resource) != 0 { + p = path.Join(p, strings.ToLower(r.resource)) + } + // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compat if nothing was changed + if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 { + p = path.Join(p, r.resourceName, r.subresource, r.subpath) + } + + finalURL := &url.URL{} + if r.baseURL != nil { + *finalURL = *r.baseURL + } + finalURL.Path = p + + query := url.Values{} + for key, values := range r.params { + for _, value := range values { + query.Add(key, value) + } + } + + // timeout is handled specially here. + if r.timeout != 0 { + query.Set("timeout", r.timeout.String()) + } + finalURL.RawQuery = query.Encode() + return finalURL +} + +// finalURLTemplate is similar to URL(), but will make all specific parameter values equal +// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query +// parameters will be reset. This creates a copy of the request so as not to change the +// underyling object. This means some useful request info (like the types of field +// selectors in use) will be lost. +// TODO: preserve field selector keys +func (r Request) finalURLTemplate() string { + if len(r.resourceName) != 0 { + r.resourceName = "{name}" + } + if r.namespaceSet && len(r.namespace) != 0 { + r.namespace = "{namespace}" + } + newParams := url.Values{} + v := []string{"{value}"} + for k := range r.params { + newParams[k] = v + } + r.params = newParams + return r.URL().String() +} + +func (r *Request) tryThrottle() { + now := time.Now() + if r.throttle != nil { + r.throttle.Accept() + } + if latency := time.Since(now); latency > longThrottleLatency { + glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + } +} + +// Watch attempts to begin watching the requested location. +// Returns a watch.Interface, or an error. +func (r *Request) Watch() (watch.Interface, error) { + // We specifically don't want to rate limit watches, so we + // don't use r.throttle here. + if r.err != nil { + return nil, r.err + } + if r.serializers.Framer == nil { + return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) + } + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return nil, err + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) + } + } + if err != nil { + // The watch stream mechanism handles many common partial data errors, so closed + // connections can be retried in many cases. + if net.IsProbableEOF(err) { + return watch.NewEmptyWatch(), nil + } + return nil, err + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + if result := r.transformResponse(resp, req); result.err != nil { + return nil, result.err + } + return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + } + framer := r.serializers.Framer.NewFrameReader(resp.Body) + decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) + return watch.NewStreamWatcher(versioned.NewDecoder(decoder, r.serializers.Decoder)), nil +} + +// updateURLMetrics is a convenience function for pushing metrics. +// It also handles corner cases for incomplete/invalid request data. +func updateURLMetrics(req *Request, resp *http.Response, err error) { + url := "none" + if req.baseURL != nil { + url = req.baseURL.Host + } + + // If we have an error (i.e. apiserver down) we report that as a metric label. + if err != nil { + metrics.RequestResult.WithLabelValues(err.Error(), req.verb, url).Inc() + } else { + //Metrics for failure codes + metrics.RequestResult.WithLabelValues(strconv.Itoa(resp.StatusCode), req.verb, url).Inc() + } +} + +// Stream formats and executes the request, and offers streaming of the response. +// Returns io.ReadCloser which could be used for streaming of the response, or an error +// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. +// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. +func (r *Request) Stream() (io.ReadCloser, error) { + if r.err != nil { + return nil, r.err + } + + r.tryThrottle() + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, nil) + if err != nil { + return nil, err + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + } + if err != nil { + return nil, err + } + + switch { + case (resp.StatusCode >= 200) && (resp.StatusCode < 300): + return resp.Body, nil + + default: + // ensure we close the body before returning the error + defer resp.Body.Close() + + // we have a decent shot at taking the object returned, parsing it as a status object and returning a more normal error + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%v while accessing %v", resp.Status, url) + } + + // TODO: Check ContentType. + if runtimeObject, err := runtime.Decode(r.serializers.Decoder, bodyBytes); err == nil { + statusError := errors.FromObject(runtimeObject) + + if _, ok := statusError.(errors.APIStatus); ok { + return nil, statusError + } + } + + bodyText := string(bodyBytes) + return nil, fmt.Errorf("%s while accessing %v: %s", resp.Status, url, bodyText) + } +} + +// request connects to the server and invokes the provided function when a server response is +// received. It handles retry behavior and up front validation of requests. It will invoke +// fn at most once. It will return an error if a problem occurred prior to connecting to the +// server - the provided function is responsible for handling server errors. +func (r *Request) request(fn func(*http.Request, *http.Response)) error { + //Metrics for total request latency + start := time.Now() + defer func() { + metrics.RequestLatency.WithLabelValues(r.verb, r.finalURLTemplate()).Observe(metrics.SinceInMicroseconds(start)) + }() + + if r.err != nil { + glog.V(4).Infof("Error in request: %v", r.err) + return r.err + } + + // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) + if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set when a resource name is provided") + } + if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set during creation") + } + + client := r.client + if client == nil { + client = http.DefaultClient + } + + // Right now we make about ten retry attempts if we get a Retry-After response. + // TODO: Change to a timeout based approach. + maxRetries := 10 + retries := 0 + for { + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return err + } + req.Header = r.headers + + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + if err != nil { + return err + } + + done := func() bool { + // ensure the response body is closed before we reconnect, so that we reuse the same + // TCP connection + defer resp.Body.Close() + + retries++ + if seconds, wait := checkWait(resp); wait && retries < maxRetries { + if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { + _, err := seeker.Seek(0, 0) + if err != nil { + glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + fn(req, resp) + return true + } + } + + glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url) + r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) + return false + } + fn(req, resp) + return true + }() + if done { + return nil + } + } +} + +// Do formats and executes the request. Returns a Result object for easy response +// processing. +// +// Error type: +// * If the request can't be constructed, or an error happened earlier while building its +// arguments: *RequestConstructionError +// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError +// * http.Client.Do errors are returned directly. +func (r *Request) Do() Result { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result = r.transformResponse(resp, req) + }) + if err != nil { + return Result{err: err} + } + return result +} + +// DoRaw executes the request but does not process the response body. +func (r *Request) DoRaw() ([]byte, error) { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result.body, result.err = ioutil.ReadAll(resp.Body) + }) + if err != nil { + return nil, err + } + return result.body, result.err +} + +// transformResponse converts an API response into a structured API object +func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { + var body []byte + if resp.Body != nil { + if data, err := ioutil.ReadAll(resp.Body); err == nil { + body = data + } + } + glog.V(8).Infof("Response Body: %s", string(body)) + + // Did the server give us a status response? + isStatusResponse := false + // Because release-1.1 server returns Status with empty APIVersion at paths + // to the Extensions resources, we need to use DecodeInto here to provide + // default groupVersion, otherwise a status response won't be correctly + // decoded. + status := &unversioned.Status{} + err := runtime.DecodeInto(r.serializers.Decoder, body, status) + if err == nil && len(status.Status) > 0 { + isStatusResponse = true + } + + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + if !isStatusResponse { + return Result{err: r.transformUnstructuredResponseError(resp, req, body)} + } + return Result{err: errors.FromObject(status)} + } + + // If the server gave us a status back, look at what it was. + success := resp.StatusCode >= http.StatusOK && resp.StatusCode <= http.StatusPartialContent + if isStatusResponse && (status.Status != unversioned.StatusSuccess && !success) { + // "Failed" requests are clearly just an error and it makes sense to return them as such. + return Result{err: errors.FromObject(status)} + } + + contentType := resp.Header.Get("Content-Type") + var decoder runtime.Decoder + if contentType == r.content.ContentType { + decoder = r.serializers.Decoder + } else { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return Result{err: errors.NewInternalError(err)} + } + decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + if err != nil { + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + } + } + } + + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + } +} + +// transformUnstructuredResponseError handles an error from the server that is not in a structured form. +// It is expected to transform any response that is not recognizable as a clear server sent error from the +// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries +// introduce a level of uncertainty to the responses returned by servers that in common use result in +// unexpected responses. The rough structure is: +// +// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes +// - this is the happy path +// - when you get this output, trust what the server sends +// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to +// generate a reasonable facsimile of the original failure. +// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above +// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error +// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected +// initial contact, the presence of mismatched body contents from posted content types +// - Give these a separate distinct error type and capture as much as possible of the original message +// +// TODO: introduce transformation of generic http.Client.Do() errors that separates 4. +func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { + if body == nil && resp.Body != nil { + if data, err := ioutil.ReadAll(resp.Body); err == nil { + body = data + } + } + glog.V(8).Infof("Response Body: %s", string(body)) + + message := "unknown" + if isTextResponse(resp) { + message = strings.TrimSpace(string(body)) + } + retryAfter, _ := retryAfterSeconds(resp) + return errors.NewGenericServerResponse( + resp.StatusCode, + req.Method, + unversioned.GroupResource{ + Group: r.content.GroupVersion.Group, + Resource: r.resource, + }, + r.resourceName, + message, + retryAfter, + true, + ) +} + +// isTextResponse returns true if the response appears to be a textual media type. +func isTextResponse(resp *http.Response) bool { + contentType := resp.Header.Get("Content-Type") + if len(contentType) == 0 { + return true + } + media, _, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return strings.HasPrefix(media, "text/") +} + +// checkWait returns true along with a number of seconds if the server instructed us to wait +// before retrying. +func checkWait(resp *http.Response) (int, bool) { + switch r := resp.StatusCode; { + // any 500 error code and 429 can trigger a wait + case r == errors.StatusTooManyRequests, r >= 500: + default: + return 0, false + } + i, ok := retryAfterSeconds(resp) + return i, ok +} + +// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if +// the header was missing or not a valid number. +func retryAfterSeconds(resp *http.Response) (int, bool) { + if h := resp.Header.Get("Retry-After"); len(h) > 0 { + if i, err := strconv.Atoi(h); err == nil { + return i, true + } + } + return 0, false +} + +// Result contains the result of calling Request.Do(). +type Result struct { + body []byte + contentType string + err error + statusCode int + + decoder runtime.Decoder +} + +// Raw returns the raw result. +func (r Result) Raw() ([]byte, error) { + return r.body, r.err +} + +// Get returns the result as an object. +func (r Result) Get() (runtime.Object, error) { + if r.err != nil { + return nil, r.err + } + if r.decoder == nil { + return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + return runtime.Decode(r.decoder, r.body) +} + +// StatusCode returns the HTTP status code of the request. (Only valid if no +// error was returned.) +func (r Result) StatusCode(statusCode *int) Result { + *statusCode = r.statusCode + return r +} + +// Into stores the result into obj, if possible. If obj is nil it is ignored. +func (r Result) Into(obj runtime.Object) error { + if r.err != nil { + return r.err + } + if r.decoder == nil { + return fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + return runtime.DecodeInto(r.decoder, r.body, obj) +} + +// WasCreated updates the provided bool pointer to whether the server returned +// 201 created or a different response. +func (r Result) WasCreated(wasCreated *bool) Result { + *wasCreated = r.statusCode == http.StatusCreated + return r +} + +// Error returns the error executing the request, nil if no error occurred. +// See the Request.Do() comment for what errors you might get. +func (r Result) Error() error { + return r.err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go new file mode 100644 index 00000000..0bfa2ea2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "crypto/tls" + "net/http" + + "k8s.io/kubernetes/pkg/client/transport" +) + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(config *Config) (*tls.Config, error) { + cfg, err := config.transportConfig() + if err != nil { + return nil, err + } + return transport.TLSConfigFor(cfg) +} + +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func TransportFor(config *Config) (http.RoundTripper, error) { + cfg, err := config.transportConfig() + if err != nil { + return nil, err + } + return transport.New(cfg) +} + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the +// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack +// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use +// the higher level TransportFor or RESTClientFor methods. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + cfg, err := config.transportConfig() + if err != nil { + return nil, err + } + return transport.HTTPWrappersForConfig(cfg, rt) +} + +// transportConfig converts a client config to an appropriate transport config. +func (c *Config) transportConfig() (*transport.Config, error) { + wt := c.WrapTransport + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + if wt != nil { + previousWT := wt + wt = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(previousWT(rt)) + } + } else { + wt = provider.WrapTransport + } + } + return &transport.Config{ + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: wt, + TLS: transport.TLSConfig{ + CAFile: c.CAFile, + CAData: c.CAData, + CertFile: c.CertFile, + CertData: c.CertData, + KeyFile: c.KeyFile, + KeyData: c.KeyData, + Insecure: c.Insecure, + }, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + Impersonate: c.Impersonate, + }, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go new file mode 100644 index 00000000..9a83d787 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "fmt" + "net/url" + "path" + + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func DefaultServerURL(host, apiPath string, groupVersion unversioned.GroupVersion, defaultTLS bool) (*url.URL, string, error) { + if host == "" { + return nil, "", fmt.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil { + return nil, "", err + } + if hostURL.Scheme == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, "", err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to + // all URIs used to access the host. this is useful when there's a proxy in front of the + // apiserver that has relocated the apiserver endpoints, forwarding all requests from, for + // example, /a/b/c to the apiserver. in this case the Path should be /a/b/c. + // + // if running without a frontend proxy (that changes the location of the apiserver), then + // hostURL.Path should be blank. + // + // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base + versionedAPIPath := path.Join("/", apiPath) + + // Add the version to the end of the path + if len(groupVersion.Group) > 0 { + versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version) + + } else { + versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version) + + } + + return hostURL, versionedAPIPath, nil +} + +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerUrlFor(config *Config) (*url.URL, string, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 + hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + if config.GroupVersion != nil { + return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS) + } + return DefaultServerURL(host, config.APIPath, unversioned.GroupVersion{}, defaultTLS) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go new file mode 100644 index 00000000..6c672f08 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "net/url" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util/flowcontrol" + "k8s.io/kubernetes/pkg/util/sets" +) + +// Set of resp. Codes that we backoff for. +// In general these should be errors that indicate a server is overloaded. +// These shouldn't be configured by any user, we set them based on conventions +// described in +var serverIsOverloadedSet = sets.NewInt(429) +var maxResponseCode = 499 + +type BackoffManager interface { + UpdateBackoff(actualUrl *url.URL, err error, responseCode int) + CalculateBackoff(actualUrl *url.URL) time.Duration + Sleep(d time.Duration) +} + +// URLBackoff struct implements the semantics on top of Backoff which +// we need for URL specific exponential backoff. +type URLBackoff struct { + // Uses backoff as underlying implementation. + Backoff *flowcontrol.Backoff +} + +// NoBackoff is a stub implementation, can be used for mocking or else as a default. +type NoBackoff struct { +} + +func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { + // do nothing. +} + +func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { + return 0 * time.Second +} + +func (n *NoBackoff) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Disable makes the backoff trivial, i.e., sets it to zero. This might be used +// by tests which want to run 1000s of mock requests without slowing down. +func (b *URLBackoff) Disable() { + glog.V(4).Infof("Disabling backoff strategy") + b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) +} + +// baseUrlKey returns the key which urls will be mapped to. +// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080. +func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { + // Simple implementation for now, just the host. + // We may backoff specific paths (i.e. "pods") differentially + // in the future. + host, err := url.Parse(rawurl.String()) + if err != nil { + glog.V(4).Infof("Error extracting url: %v", rawurl) + panic("bad url!") + } + return host.Host +} + +// UpdateBackoff updates backoff metadata +func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { + // range for retry counts that we store is [0,13] + if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) { + b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) + return + } else if responseCode >= 300 || err != nil { + glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + } + + //If we got this far, there is no backoff required for this URL anymore. + b.Backoff.Reset(b.baseUrlKey(actualUrl)) +} + +// CalculateBackoff takes a url and back's off exponentially, +// based on its knowledge of existing failures. +func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { + return b.Backoff.Get(b.baseUrlKey(actualUrl)) +} + +func (b *URLBackoff) Sleep(d time.Duration) { + b.Backoff.Clock.Sleep(d) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go new file mode 100644 index 00000000..e12c05c1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restclient + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + + "k8s.io/kubernetes/pkg/api/unversioned" +) + +const ( + legacyAPIPath = "/api" + defaultAPIPath = "/apis" +) + +// TODO: Is this obsoleted by the discovery client? + +// ServerAPIVersions returns the GroupVersions supported by the API server. +// It creates a RESTClient based on the passed in config, but it doesn't rely +// on the Version and Codec of the config, because it uses AbsPath and +// takes the raw response. +func ServerAPIVersions(c *Config) (groupVersions []string, err error) { + transport, err := TransportFor(c) + if err != nil { + return nil, err + } + client := http.Client{Transport: transport} + + configCopy := *c + configCopy.GroupVersion = nil + configCopy.APIPath = "" + baseURL, _, err := defaultServerUrlFor(&configCopy) + if err != nil { + return nil, err + } + // Get the groupVersions exposed at /api + originalPath := baseURL.Path + baseURL.Path = path.Join(originalPath, legacyAPIPath) + resp, err := client.Get(baseURL.String()) + if err != nil { + return nil, err + } + var v unversioned.APIVersions + defer resp.Body.Close() + err = json.NewDecoder(resp.Body).Decode(&v) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + + groupVersions = append(groupVersions, v.Versions...) + // Get the groupVersions exposed at /apis + baseURL.Path = path.Join(originalPath, defaultAPIPath) + resp2, err := client.Get(baseURL.String()) + if err != nil { + return nil, err + } + var apiGroupList unversioned.APIGroupList + defer resp2.Body.Close() + err = json.NewDecoder(resp2.Body).Decode(&apiGroupList) + if err != nil { + return nil, fmt.Errorf("unexpected error: %v", err) + } + + for _, g := range apiGroupList.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + + return groupVersions, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go b/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go new file mode 100644 index 00000000..8c07f539 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go @@ -0,0 +1,88 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net" + "net/http" + "sync" + "time" + + utilnet "k8s.io/kubernetes/pkg/util/net" +) + +// TlsTransportCache caches TLS http.RoundTrippers different configurations. The +// same RoundTripper will be returned for configs with identical TLS options If +// the config has no custom TLS options, http.DefaultTransport is returned. +type tlsTransportCache struct { + mu sync.Mutex + transports map[string]*http.Transport +} + +const idleConnsPerHost = 25 + +var tlsCache = &tlsTransportCache{transports: make(map[string]*http.Transport)} + +func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { + key, err := tlsConfigKey(config) + if err != nil { + return nil, err + } + + // Ensure we only create a single transport for the given TLS options + c.mu.Lock() + defer c.mu.Unlock() + + // See if we already have a custom transport for this config + if t, ok := c.transports[key]; ok { + return t, nil + } + + // Get the TLS options for this client config + tlsConfig, err := TLSConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil { + return http.DefaultTransport, nil + } + + // Cache a single transport for these options + c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + MaxIdleConnsPerHost: idleConnsPerHost, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }) + return c.transports[key], nil +} + +// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor +func tlsConfigKey(c *Config) (string, error) { + // Make sure ca/key/cert content is loaded + if err := loadTLSFiles(c); err != nil { + return "", err + } + // Only include the things that actually affect the tls.Config + return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/config.go b/vendor/k8s.io/kubernetes/pkg/client/transport/config.go new file mode 100644 index 00000000..63a63fbb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/transport/config.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import "net/http" + +// Config holds various options for establishing a transport. +type Config struct { + // UserAgent is an optional field that specifies the caller of this + // request. + UserAgent string + + // The base TLS configuration for this transport. + TLS TLSConfig + + // Username and password for basic authentication + Username string + Password string + + // Bearer token for authentication + BearerToken string + + // Impersonate is the username that this Config will impersonate + Impersonate string + + // Transport may be used for custom HTTP behavior. This attribute may + // not be specified with the TLS client certificate options. Use + // WrapTransport for most client level operations. + Transport http.RoundTripper + + // WrapTransport will be invoked for custom HTTP behavior after the + // underlying transport is initialized (either the transport created + // from TLSClientConfig, Transport, or http.DefaultTransport). The + // config may layer other RoundTrippers on top of the returned + // RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper +} + +// HasCA returns whether the configuration has a certificate authority or not. +func (c *Config) HasCA() bool { + return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 +} + +// HasBasicAuth returns whether the configuration has basic authentication or not. +func (c *Config) HasBasicAuth() bool { + return len(c.Username) != 0 +} + +// HasTokenAuth returns whether the configuration has token authentication or not. +func (c *Config) HasTokenAuth() bool { + return len(c.BearerToken) != 0 +} + +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *Config) HasCertAuth() bool { + return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 +} + +// TLSConfig holds the information needed to set up a TLS transport. +type TLSConfig struct { + CAFile string // Path of the PEM-encoded server trusted root certificates. + CertFile string // Path of the PEM-encoded client certificate. + KeyFile string // Path of the PEM-encoded client key. + + Insecure bool // Server should be accessed without verifying the certificate. For testing only. + + CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. + CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. + KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go b/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go new file mode 100644 index 00000000..55284ebc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go @@ -0,0 +1,337 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net/http" + "time" + + "github.com/golang/glog" +) + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered +// behavior from the config. Exposed to allow more clients that need HTTP-like +// behavior but then must hijack the underlying connection (like WebSocket or +// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from +// New. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + if config.WrapTransport != nil { + rt = config.WrapTransport(rt) + } + + rt = DebugWrappers(rt) + + // Set authentication wrappers + switch { + case config.HasBasicAuth() && config.HasTokenAuth(): + return nil, fmt.Errorf("username/password or bearer token may be set, but not both") + case config.HasTokenAuth(): + rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + case config.HasBasicAuth(): + rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) + } + if len(config.UserAgent) > 0 { + rt = NewUserAgentRoundTripper(config.UserAgent, rt) + } + if len(config.Impersonate) > 0 { + rt = NewImpersonatingRoundTripper(config.Impersonate, rt) + } + return rt, nil +} + +// DebugWrappers wraps a round tripper and logs based on the current log level. +func DebugWrappers(rt http.RoundTripper) http.RoundTripper { + switch { + case bool(glog.V(9)): + rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) + case bool(glog.V(8)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) + case bool(glog.V(7)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) + case bool(glog.V(6)): + rt = newDebuggingRoundTripper(rt, debugURLTiming) + } + + return rt +} + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +type userAgentRoundTripper struct { + agent string + rt http.RoundTripper +} + +func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{agent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("User-Agent")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set("User-Agent", rt.agent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type basicAuthRoundTripper struct { + username string + password string + rt http.RoundTripper +} + +// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a +// request unless it has already been set. +func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} +} + +func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + req.SetBasicAuth(rt.username, rt.password) + return rt.rt.RoundTrip(req) +} + +func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type impersonatingRoundTripper struct { + impersonate string + delegate http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. +func NewImpersonatingRoundTripper(impersonate string, delegate http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{impersonate, delegate} +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Impersonate-User")) != 0 { + return rt.delegate.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set("Impersonate-User", rt.impersonate) + return rt.delegate.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegate.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } + +type bearerAuthRoundTripper struct { + bearer string + rt http.RoundTripper +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { + return &bearerAuthRoundTripper{bearer, rt} +} + +func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + return rt.rt.RoundTrip(req) +} + +func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +// requestInfo keeps track of information about a request/response combination +type requestInfo struct { + RequestHeaders http.Header + RequestVerb string + RequestURL string + + ResponseStatus string + ResponseHeaders http.Header + ResponseErr error + + Duration time.Duration +} + +// newRequestInfo creates a new RequestInfo based on an http request +func newRequestInfo(req *http.Request) *requestInfo { + return &requestInfo{ + RequestURL: req.URL.String(), + RequestVerb: req.Method, + RequestHeaders: req.Header, + } +} + +// complete adds information about the response to the requestInfo +func (r *requestInfo) complete(response *http.Response, err error) { + if err != nil { + r.ResponseErr = err + return + } + r.ResponseStatus = response.Status + r.ResponseHeaders = response.Header +} + +// toCurl returns a string that can be run as a command in a terminal (minus the body) +func (r *requestInfo) toCurl() string { + headers := "" + for key, values := range r.RequestHeaders { + for _, value := range values { + headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) + } + } + + return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) +} + +// debuggingRoundTripper will display information about the requests passing +// through it based on what is configured +type debuggingRoundTripper struct { + delegatedRoundTripper http.RoundTripper + + levels map[debugLevel]bool +} + +type debugLevel int + +const ( + debugJustURL debugLevel = iota + debugURLTiming + debugCurlCommand + debugRequestHeaders + debugResponseStatus + debugResponseHeaders +) + +func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { + drt := &debuggingRoundTripper{ + delegatedRoundTripper: rt, + levels: make(map[debugLevel]bool, len(levels)), + } + for _, v := range levels { + drt.levels[v] = true + } + return drt +} + +func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + reqInfo := newRequestInfo(req) + + if rt.levels[debugJustURL] { + glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + } + if rt.levels[debugCurlCommand] { + glog.Infof("%s", reqInfo.toCurl()) + + } + if rt.levels[debugRequestHeaders] { + glog.Infof("Request Headers:") + for key, values := range reqInfo.RequestHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + startTime := time.Now() + response, err := rt.delegatedRoundTripper.RoundTrip(req) + reqInfo.Duration = time.Since(startTime) + + reqInfo.complete(response, err) + + if rt.levels[debugURLTiming] { + glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseStatus] { + glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseHeaders] { + glog.Infof("Response Headers:") + for key, values := range reqInfo.ResponseHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + return response, err +} + +func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.delegatedRoundTripper +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go b/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go new file mode 100644 index 00000000..2d20e1b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go @@ -0,0 +1,140 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" +) + +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func New(config *Config) (http.RoundTripper, error) { + // Set transport level security + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { + return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") + } + + var ( + rt http.RoundTripper + err error + ) + + if config.Transport != nil { + rt = config.Transport + } else { + rt, err = tlsCache.get(config) + if err != nil { + return nil, err + } + } + + return HTTPWrappersForConfig(config, rt) +} + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(c *Config) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { + return nil, nil + } + if c.HasCA() && c.TLS.Insecure { + return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: c.TLS.Insecure, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *Config) error { + var err error + c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) + if err != nil { + return err + } + + c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) + if err != nil { + return err + } + + c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go new file mode 100644 index 00000000..635ca4a9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go @@ -0,0 +1,317 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/emicklei/go-restful/swagger" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/version" +) + +// DiscoveryInterface holds the methods that discover server-supported API groups, +// versions and resources. +type DiscoveryInterface interface { + ServerGroupsInterface + ServerResourcesInterface + ServerVersionInterface + SwaggerSchemaInterface +} + +// ServerGroupsInterface has methods for obtaining supported groups on the API server +type ServerGroupsInterface interface { + // ServerGroups returns the supported groups, with information like supported versions and the + // preferred version. + ServerGroups() (*unversioned.APIGroupList, error) +} + +// ServerResourcesInterface has methods for obtaining supported resources on the API server +type ServerResourcesInterface interface { + // ServerResourcesForGroupVersion returns the supported resources for a group and version. + ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) + // ServerResources returns the supported resources for all groups and versions. + ServerResources() (map[string]*unversioned.APIResourceList, error) + // ServerPreferredResources returns the supported resources with the version preferred by the + // server. + ServerPreferredResources() ([]unversioned.GroupVersionResource, error) + // ServerPreferredNamespacedResources returns the supported namespaced resources with the + // version preferred by the server. + ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) +} + +// ServerVersionInterface has a method for retrieving the server's version. +type ServerVersionInterface interface { + // ServerVersion retrieves and parses the server's version (git version). + ServerVersion() (*version.Info, error) +} + +// SwaggerSchemaInterface has a method to retrieve the swagger schema. +type SwaggerSchemaInterface interface { + // SwaggerSchema retrieves and parses the swagger API schema the server supports. + SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) +} + +// DiscoveryClient implements the functions that discover server-supported API groups, +// versions and resources. +type DiscoveryClient struct { + *restclient.RESTClient + + LegacyPrefix string +} + +// Convert unversioned.APIVersions to unversioned.APIGroup. APIVersions is used by legacy v1, so +// group would be "". +func apiVersionsToAPIGroup(apiVersions *unversioned.APIVersions) (apiGroup unversioned.APIGroup) { + groupVersions := []unversioned.GroupVersionForDiscovery{} + for _, version := range apiVersions.Versions { + groupVersion := unversioned.GroupVersionForDiscovery{ + GroupVersion: version, + Version: version, + } + groupVersions = append(groupVersions, groupVersion) + } + apiGroup.Versions = groupVersions + // There should be only one groupVersion returned at /api + apiGroup.PreferredVersion = groupVersions[0] + return +} + +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) { + // Get the groupVersions exposed at /api + v := &unversioned.APIVersions{} + err = d.Get().AbsPath(d.LegacyPrefix).Do().Into(v) + apiGroup := unversioned.APIGroup{} + if err == nil { + apiGroup = apiVersionsToAPIGroup(v) + } + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + + // Get the groupVersions exposed at /apis + apiGroupList = &unversioned.APIGroupList{} + err = d.Get().AbsPath("/apis").Do().Into(apiGroupList) + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api + if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + apiGroupList = &unversioned.APIGroupList{} + } + + // append the group retrieved from /api to the list + apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) + return apiGroupList, nil +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *unversioned.APIResourceList, err error) { + url := url.URL{} + if len(groupVersion) == 0 { + return nil, fmt.Errorf("groupVersion shouldn't be empty") + } + if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { + url.Path = d.LegacyPrefix + "/" + groupVersion + } else { + url.Path = "/apis/" + groupVersion + } + resources = &unversioned.APIResourceList{} + err = d.Get().AbsPath(url.String()).Do().Into(resources) + if err != nil { + // ignore 403 or 404 error to be compatible with an v1.0 server. + if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + return resources, nil + } else { + return nil, err + } + } + return resources, nil +} + +// ServerResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerResources() (map[string]*unversioned.APIResourceList, error) { + apiGroups, err := d.ServerGroups() + if err != nil { + return nil, err + } + groupVersions := unversioned.ExtractGroupVersions(apiGroups) + result := map[string]*unversioned.APIResourceList{} + for _, groupVersion := range groupVersions { + resources, err := d.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return nil, err + } + result[groupVersion] = resources + } + return result, nil +} + +// serverPreferredResources returns the supported resources with the version preferred by the +// server. If namespaced is true, only namespaced resources will be returned. +func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversioned.GroupVersionResource, error) { + results := []unversioned.GroupVersionResource{} + serverGroupList, err := d.ServerGroups() + if err != nil { + return results, err + } + + allErrs := []error{} + for _, apiGroup := range serverGroupList.Groups { + preferredVersion := apiGroup.PreferredVersion + apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion) + if err != nil { + allErrs = append(allErrs, err) + continue + } + groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version} + for _, apiResource := range apiResourceList.APIResources { + // ignore the root scoped resources if "namespaced" is true. + if namespaced && !apiResource.Namespaced { + continue + } + if strings.Contains(apiResource.Name, "/") { + continue + } + results = append(results, groupVersion.WithResource(apiResource.Name)) + } + } + return results, utilerrors.NewAggregate(allErrs) +} + +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. +func (d *DiscoveryClient) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { + return d.serverPreferredResources(false) +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. +func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { + return d.serverPreferredResources(true) +} + +// ServerVersion retrieves and parses the server's version (git version). +func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { + body, err := d.Get().AbsPath("/version").Do().Raw() + if err != nil { + return nil, err + } + var info version.Info + err = json.Unmarshal(body, &info) + if err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &info, nil +} + +// SwaggerSchema retrieves and parses the swagger API schema the server supports. +func (d *DiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { + if version.IsEmpty() { + return nil, fmt.Errorf("groupVersion cannot be empty") + } + + groupList, err := d.ServerGroups() + if err != nil { + return nil, err + } + groupVersions := unversioned.ExtractGroupVersions(groupList) + // This check also takes care the case that kubectl is newer than the running endpoint + if stringDoesntExistIn(version.String(), groupVersions) { + return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions) + } + var path string + if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion { + path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version + } else { + path = "/swaggerapi/apis/" + version.Group + "/" + version.Version + } + + body, err := d.Get().AbsPath(path).Do().Raw() + if err != nil { + return nil, err + } + var schema swagger.ApiDeclaration + err = json.Unmarshal(body, &schema) + if err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &schema, nil +} + +func setDiscoveryDefaults(config *restclient.Config) error { + config.APIPath = "" + config.GroupVersion = nil + codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} + config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper( + runtime.SerializerInfo{Serializer: codec}, + runtime.StreamSerializerInfo{}, + ) + if len(config.UserAgent) == 0 { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + return nil +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client +// can be used to discover supported resources in the API server. +func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { + config := *c + if err := setDiscoveryDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.UnversionedRESTClientFor(&config) + return &DiscoveryClient{RESTClient: client, LegacyPrefix: "/api"}, err +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If +// there is an error, it panics. +func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { + client, err := NewDiscoveryClientForConfig(c) + if err != nil { + panic(err) + } + return client + +} + +// New creates a new DiscoveryClient for the given RESTClient. +func NewDiscoveryClient(c *restclient.RESTClient) *DiscoveryClient { + return &DiscoveryClient{RESTClient: c, LegacyPrefix: "/api"} +} + +func stringDoesntExistIn(str string, slice []string) bool { + for _, s := range slice { + if s == str { + return false + } + } + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go new file mode 100644 index 00000000..7e6dee0c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalclientset + +import ( + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" + unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" + "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/client/unversioned" +) + +// FromUnversionedClient adapts a unversioned.Client to a internalclientset.Clientset. +// This function is temporary. We will remove it when everyone has moved to using +// Clientset. New code should NOT use this function. +func FromUnversionedClient(c *unversioned.Client) *internalclientset.Clientset { + var clientset internalclientset.Clientset + if c != nil { + clientset.CoreClient = unversionedcore.New(c.RESTClient) + } else { + clientset.CoreClient = unversionedcore.New(nil) + } + if c != nil && c.ExtensionsClient != nil { + clientset.ExtensionsClient = unversionedextensions.New(c.ExtensionsClient.RESTClient) + } else { + clientset.ExtensionsClient = unversionedextensions.New(nil) + } + if c != nil && c.BatchClient != nil { + clientset.BatchClient = unversionedbatch.New(c.BatchClient.RESTClient) + } else { + clientset.BatchClient = unversionedbatch.New(nil) + } + if c != nil && c.AutoscalingClient != nil { + clientset.AutoscalingClient = unversionedautoscaling.New(c.AutoscalingClient.RESTClient) + } else { + clientset.AutoscalingClient = unversionedautoscaling.New(nil) + } + if c != nil && c.DiscoveryClient != nil { + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient) + } else { + clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil) + } + + return &clientset +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go new file mode 100644 index 00000000..1905c29c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type AppsInterface interface { + PetSetNamespacer +} + +// AppsClient is used to interact with Kubernetes batch features. +type AppsClient struct { + *restclient.RESTClient +} + +func (c *AppsClient) PetSets(namespace string) PetSetInterface { + return newPetSet(c, namespace) +} + +func NewApps(c *restclient.Config) (*AppsClient, error) { + config := *c + if err := setAppsDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsClient{client}, nil +} + +func NewAppsOrDie(c *restclient.Config) *AppsClient { + client, err := NewApps(c) + if err != nil { + panic(err) + } + return client +} + +func setAppsDefaults(config *restclient.Config) error { + g, err := registered.Group(apps.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go new file mode 100644 index 00000000..64b3ef6b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go @@ -0,0 +1,125 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package auth defines a file format for holding authentication +information needed by clients of Kubernetes. Typically, +a Kubernetes cluster will put auth info for the admin in a known +location when it is created, and will (soon) put it in a known +location within a Container's file tree for Containers that +need access to the Kubernetes API. + +Having a defined format allows: + - clients to be implmented in multiple languages + - applications which link clients to be portable across + clusters with different authentication styles (e.g. + some may use SSL Client certs, others may not, etc) + - when the format changes, applications only + need to update this code. + +The file format is json, marshalled from a struct authcfg.Info. + +Clinet libraries in other languages should use the same format. + +It is not intended to store general preferences, such as default +namespace, output options, etc. CLIs (such as kubectl) and UIs should +develop their own format and may wish to inline the authcfg.Info type. + +The authcfg.Info is just a file format. It is distinct from +client.Config which holds options for creating a client.Client. +Helper functions are provided in this package to fill in a +client.Client from an authcfg.Info. + +Example: + + import ( + "pkg/client" + "pkg/client/auth" + ) + + info, err := auth.LoadFromFile(filename) + if err != nil { + // handle error + } + clientConfig = client.Config{} + clientConfig.Host = "example.com:4901" + clientConfig = info.MergeWithConfig() + client := client.New(clientConfig) + client.Pods(ns).List() +*/ +package auth + +// TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. +import ( + "encoding/json" + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/client/restclient" +) + +// Info holds Kubernetes API authorization config. It is intended +// to be read/written from a file as a JSON object. +type Info struct { + User string + Password string + CAFile string + CertFile string + KeyFile string + BearerToken string + Insecure *bool +} + +// LoadFromFile parses an Info object from a file path. +// If the file does not exist, then os.IsNotExist(err) == true +func LoadFromFile(path string) (*Info, error) { + var info Info + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, err + } + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &info) + if err != nil { + return nil, err + } + return &info, err +} + +// MergeWithConfig returns a copy of a client.Config with values from the Info. +// The fields of client.Config with a corresponding field in the Info are set +// with the value from the Info. +func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { + var config restclient.Config = c + config.Username = info.User + config.Password = info.Password + config.CAFile = info.CAFile + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + if info.Insecure != nil { + config.Insecure = *info.Insecure + } + return config, nil +} + +func (info Info) Complete() bool { + return len(info.User) > 0 || + len(info.CertFile) > 0 || + len(info.BearerToken) > 0 +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go new file mode 100644 index 00000000..9e543c9d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go @@ -0,0 +1,84 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type AutoscalingInterface interface { + HorizontalPodAutoscalersNamespacer +} + +// AutoscalingClient is used to interact with Kubernetes autoscaling features. +type AutoscalingClient struct { + *restclient.RESTClient +} + +func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +func NewAutoscaling(c *restclient.Config) (*AutoscalingClient, error) { + config := *c + if err := setAutoscalingDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingClient{client}, nil +} + +func NewAutoscalingOrDie(c *restclient.Config) *AutoscalingClient { + client, err := NewAutoscaling(c) + if err != nil { + panic(err) + } + return client +} + +func setAutoscalingDefaults(config *restclient.Config) error { + // if autoscaling group is not registered, return an error + g, err := registered.Group(autoscaling.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go new file mode 100644 index 00000000..40fc49dc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go @@ -0,0 +1,114 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type BatchInterface interface { + JobsNamespacer + ScheduledJobsNamespacer +} + +// BatchClient is used to interact with Kubernetes batch features. +type BatchClient struct { + *restclient.RESTClient +} + +func (c *BatchClient) Jobs(namespace string) JobInterface { + return newJobsV1(c, namespace) +} + +func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface { + return newScheduledJobs(c, namespace) +} + +func NewBatch(c *restclient.Config) (*BatchClient, error) { + config := *c + if err := setBatchDefaults(&config, nil); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchClient{client}, nil +} + +func NewBatchV2Alpha1(c *restclient.Config) (*BatchClient, error) { + config := *c + if err := setBatchDefaults(&config, &v2alpha1.SchemeGroupVersion); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchClient{client}, nil +} + +func NewBatchOrDie(c *restclient.Config) *BatchClient { + var ( + client *BatchClient + err error + ) + if c.ContentConfig.GroupVersion != nil && *c.ContentConfig.GroupVersion == v2alpha1.SchemeGroupVersion { + client, err = NewBatchV2Alpha1(c) + } else { + client, err = NewBatch(c) + } + if err != nil { + panic(err) + } + return client +} + +func setBatchDefaults(config *restclient.Config, gv *unversioned.GroupVersion) error { + // if batch group is not registered, return an error + g, err := registered.Group(batch.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + if gv != nil { + copyGroupVersion = *gv + } + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go new file mode 100644 index 00000000..183bb5f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go @@ -0,0 +1,179 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "net" + "net/url" + "strings" + + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/typed/discovery" +) + +// Interface holds the methods for clients of Kubernetes, +// an interface to allow mock testing. +type Interface interface { + PodsNamespacer + PodTemplatesNamespacer + ReplicationControllersNamespacer + ServicesNamespacer + EndpointsNamespacer + NodesInterface + EventNamespacer + LimitRangesNamespacer + ResourceQuotasNamespacer + ServiceAccountsNamespacer + SecretsNamespacer + NamespacesInterface + PersistentVolumesInterface + PersistentVolumeClaimsNamespacer + ComponentStatusesInterface + ConfigMapsNamespacer + Autoscaling() AutoscalingInterface + Batch() BatchInterface + Extensions() ExtensionsInterface + Rbac() RbacInterface + Discovery() discovery.DiscoveryInterface + + SecurityContextConstraintsInterface +} + +func (c *Client) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *Client) SecurityContextConstraints() SecurityContextConstraintInterface { + return newSecurityContextConstraints(c) +} + +func (c *Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *Client) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *Client) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *Client) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} +func (c *Client) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *Client) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *Client) ServiceAccounts(namespace string) ServiceAccountsInterface { + return newServiceAccounts(c, namespace) +} + +func (c *Client) Secrets(namespace string) SecretsInterface { + return newSecrets(c, namespace) +} + +func (c *Client) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *Client) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *Client) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *Client) ConfigMaps(namespace string) ConfigMapsInterface { + return newConfigMaps(c, namespace) +} + +// Client is the implementation of a Kubernetes client. +type Client struct { + *restclient.RESTClient + *AutoscalingClient + *BatchClient + *ExtensionsClient + *AppsClient + *PolicyClient + *RbacClient + *discovery.DiscoveryClient +} + +// IsTimeout tests if this is a timeout error in the underlying transport. +// This is unbelievably ugly. +// See: http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error for details +func IsTimeout(err error) bool { + if err == nil { + return false + } + switch err := err.(type) { + case *url.Error: + if err, ok := err.Err.(net.Error); ok { + return err.Timeout() + } + case net.Error: + return err.Timeout() + } + + if strings.Contains(err.Error(), "use of closed network connection") { + return true + } + return false +} + +func (c *Client) Autoscaling() AutoscalingInterface { + return c.AutoscalingClient +} + +func (c *Client) Batch() BatchInterface { + return c.BatchClient +} + +func (c *Client) Extensions() ExtensionsInterface { + return c.ExtensionsClient +} + +func (c *Client) Apps() AppsInterface { + return c.AppsClient +} + +func (c *Client) Rbac() RbacInterface { + return c.RbacClient +} + +func (c *Client) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go new file mode 100644 index 00000000..87330c50 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go @@ -0,0 +1,183 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +func init() { + sDec, _ := base64.StdEncoding.DecodeString("REDACTED+") + redactedBytes = []byte(string(sDec)) +} + +// IsConfigEmpty returns true if the config is empty. +func IsConfigEmpty(config *Config) bool { + return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 && + len(config.CurrentContext) == 0 && + len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors && + len(config.Extensions) == 0 +} + +// MinifyConfig read the current context and uses that to keep only the relevant pieces of config +// This is useful for making secrets based on kubeconfig files +func MinifyConfig(config *Config) error { + if len(config.CurrentContext) == 0 { + return errors.New("current-context must exist in order to minify") + } + + currContext, exists := config.Contexts[config.CurrentContext] + if !exists { + return fmt.Errorf("cannot locate context %v", config.CurrentContext) + } + + newContexts := map[string]*Context{} + newContexts[config.CurrentContext] = currContext + + newClusters := map[string]*Cluster{} + if len(currContext.Cluster) > 0 { + if _, exists := config.Clusters[currContext.Cluster]; !exists { + return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) + } + + newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] + } + + newAuthInfos := map[string]*AuthInfo{} + if len(currContext.AuthInfo) > 0 { + if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { + return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) + } + + newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo] + } + + config.AuthInfos = newAuthInfos + config.Clusters = newClusters + config.Contexts = newContexts + + return nil +} + +var redactedBytes []byte + +// Flatten redacts raw data entries from the config object for a human-readable view. +func ShortenConfig(config *Config) { + // trick json encoder into printing a human readable string in the raw data + // by base64 decoding what we want to print. Relies on implementation of + // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte + for key, authInfo := range config.AuthInfos { + if len(authInfo.ClientKeyData) > 0 { + authInfo.ClientKeyData = redactedBytes + } + if len(authInfo.ClientCertificateData) > 0 { + authInfo.ClientCertificateData = redactedBytes + } + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + if len(cluster.CertificateAuthorityData) > 0 { + cluster.CertificateAuthorityData = redactedBytes + } + config.Clusters[key] = cluster + } +} + +// Flatten changes the config object into a self contained config (useful for making secrets) +func FlattenConfig(config *Config) error { + for key, authInfo := range config.AuthInfos { + baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil { + return err + } + if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil { + return err + } + + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil { + return err + } + + config.Clusters[key] = cluster + } + + return nil +} + +func FlattenContent(path *string, contents *[]byte, baseDir string) error { + if len(*path) != 0 { + if len(*contents) > 0 { + return errors.New("cannot have values for both path and contents") + } + + var err error + absPath := ResolvePath(*path, baseDir) + *contents, err = ioutil.ReadFile(absPath) + if err != nil { + return err + } + + *path = "" + } + + return nil +} + +// ResolvePath returns the path as an absolute paths, relative to the given base directory +func ResolvePath(path string, base string) string { + // Don't resolve empty paths + if len(path) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(path) { + return filepath.Join(base, path) + } + } + + return path +} + +func MakeAbs(path, base string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + if len(base) == 0 { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + base = cwd + } + return filepath.Join(base, path), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go new file mode 100644 index 00000000..f3083a84 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go @@ -0,0 +1,54 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package latest + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + _ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/json" + "k8s.io/kubernetes/pkg/runtime/serializer/versioning" +) + +// Version is the string that represents the current external default version. +const Version = "v1" + +var ExternalVersion = unversioned.GroupVersion{Group: "", Version: "v1"} + +// OldestVersion is the string that represents the oldest server version supported, +// for client code that wants to hardcode the lowest common denominator. +const OldestVersion = "v1" + +// Versions is the list of versions that are recognized in code. The order provided +// may be assumed to be least feature rich to most feature rich, and clients may +// choose to prefer the latter items in the list over the former items when presented +// with a set of versions to choose. +var Versions = []string{"v1"} + +var Codec runtime.Codec + +func init() { + yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme) + Codec = versioning.NewCodecForScheme( + api.Scheme, + yamlSerializer, + yamlSerializer, + unversioned.GroupVersion{Version: Version}, + runtime.InternalGroupVersioner, + ) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go new file mode 100644 index 00000000..f26a6cd1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +var Scheme = runtime.NewScheme() + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + +func init() { + Scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) +} + +func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go new file mode 100644 index 00000000..56b44e8f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go @@ -0,0 +1,152 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + Kind string `json:"kind,omitempty"` + // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters map[string]*Cluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos map[string]*AuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts map[string]*Context `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Preferences struct { + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Impersonate is the username to act-as. + Impersonate string `json:"act-as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config,omitempty"` +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewConfig() *Config { + return &Config{ + Preferences: *NewPreferences(), + Clusters: make(map[string]*Cluster), + AuthInfos: make(map[string]*AuthInfo), + Contexts: make(map[string]*Context), + Extensions: make(map[string]runtime.Object), + } +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewContext() *Context { + return &Context{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewCluster() *Cluster { + return &Cluster{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewAuthInfo() *AuthInfo { + return &AuthInfo{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewPreferences() *Preferences { + return &Preferences{Extensions: make(map[string]runtime.Object)} +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go new file mode 100644 index 00000000..e03fc60b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go @@ -0,0 +1,231 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sort" + + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" +) + +func init() { + err := api.Scheme.AddConversionFuncs( + func(in *Cluster, out *api.Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Cluster, out *Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Preferences, out *api.Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Preferences, out *Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Context, out *api.Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Context, out *Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + + func(in *Config, out *api.Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make(map[string]*api.Cluster) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make(map[string]*api.AuthInfo) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make(map[string]*api.Context) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make(map[string]runtime.Object) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *api.Config, out *Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make([]NamedCluster, 0, 0) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make([]NamedAuthInfo, 0, 0) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make([]NamedContext, 0, 0) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make([]NamedExtension, 0, 0) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { + for _, curr := range *in { + newCluster := api.NewCluster() + if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { + return err + } + (*out)[curr.Name] = newCluster + } + + return nil + }, + func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newCluster := (*in)[key] + oldCluster := &Cluster{} + if err := s.Convert(newCluster, oldCluster, 0); err != nil { + return err + } + + namedCluster := NamedCluster{key, *oldCluster} + *out = append(*out, namedCluster) + } + + return nil + }, + func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { + for _, curr := range *in { + newAuthInfo := api.NewAuthInfo() + if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { + return err + } + (*out)[curr.Name] = newAuthInfo + } + + return nil + }, + func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newAuthInfo := (*in)[key] + oldAuthInfo := &AuthInfo{} + if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { + return err + } + + namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} + *out = append(*out, namedAuthInfo) + } + + return nil + }, + func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { + for _, curr := range *in { + newContext := api.NewContext() + if err := s.Convert(&curr.Context, newContext, 0); err != nil { + return err + } + (*out)[curr.Name] = newContext + } + + return nil + }, + func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newContext := (*in)[key] + oldContext := &Context{} + if err := s.Convert(newContext, oldContext, 0); err != nil { + return err + } + + namedContext := NamedContext{key, *oldContext} + *out = append(*out, namedContext) + } + + return nil + }, + func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { + for _, curr := range *in { + var newExtension runtime.Object + if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { + return err + } + (*out)[curr.Name] = newExtension + } + + return nil + }, + func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newExtension := (*in)[key] + oldExtension := &runtime.RawExtension{} + if err := s.Convert(newExtension, oldExtension, 0); err != nil { + return err + } + + namedExtension := NamedExtension{key, *oldExtension} + *out = append(*out, namedExtension) + } + + return nil + }, + ) + if err != nil { + // If one of the conversion functions is malformed, detect it immediately. + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go new file mode 100644 index 00000000..e5c9e88e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go @@ -0,0 +1,40 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: "v1"} + +func init() { + api.Scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) +} + +func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { + return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go new file mode 100644 index 00000000..46b5dbaa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go @@ -0,0 +1,145 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/kubernetes/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + Kind string `json:"kind,omitempty"` + // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters []NamedCluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos []NamedAuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts []NamedContext `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +type Preferences struct { + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // ClientCertificate is the path to a client cert file for TLS. + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + Token string `json:"token,omitempty"` + // Impersonate is the username to imperonate. The name matches the flag. + Impersonate string `json:"as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster Cluster `json:"cluster"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context Context `json:"context"` +} + +// NamedAuthInfo relates nicknames to auth information +type NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo AuthInfo `json:"user"` +} + +// NamedExtension relates nicknames to extension information +type NamedExtension struct { + // Name is the nickname for this Extension + Name string `json:"name"` + // Extension holds the extension information + Extension runtime.RawExtension `json:"extension"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go new file mode 100644 index 00000000..8b10ce2b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go @@ -0,0 +1,91 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + + clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" +) + +// AuthLoaders are used to build clientauth.Info objects. +type AuthLoader interface { + // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info + LoadAuth(path string) (*clientauth.Info, error) +} + +// default implementation of an AuthLoader +type defaultAuthLoader struct{} + +// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile +func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { + return clientauth.LoadFromFile(path) +} + +type PromptingAuthLoader struct { + reader io.Reader +} + +// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. +func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { + var auth clientauth.Info + // Prompt for user/pass and write a file if none exists. + if _, err := os.Stat(path); os.IsNotExist(err) { + auth = *a.Prompt() + data, err := json.Marshal(auth) + if err != nil { + return &auth, err + } + err = ioutil.WriteFile(path, data, 0600) + return &auth, err + } + authPtr, err := clientauth.LoadFromFile(path) + if err != nil { + return nil, err + } + return authPtr, nil +} + +// Prompt pulls the user and password from a reader +func (a *PromptingAuthLoader) Prompt() *clientauth.Info { + auth := &clientauth.Info{} + auth.User = promptForString("Username", a.reader) + auth.Password = promptForString("Password", a.reader) + + return auth +} + +func promptForString(field string, r io.Reader) string { + fmt.Printf("Please enter %s: ", field) + var result string + fmt.Fscan(r, &result) + return result +} + +// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. +func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader { + return &PromptingAuthLoader{reader} +} + +// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file +func NewDefaultAuthLoader() AuthLoader { + return &defaultAuthLoader{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go new file mode 100644 index 00000000..c83f315a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go @@ -0,0 +1,411 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strings" + + "github.com/golang/glog" + "github.com/imdario/mergo" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/restclient" + clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +var ( + // DefaultCluster is the cluster config used when no other config is specified + // TODO: eventually apiserver should start on 443 and be secure by default + DefaultCluster = clientcmdapi.Cluster{Server: "http://localhost:8080"} + + // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name + EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} + + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil, NewDefaultClientConfigLoadingRules()} +) + +// ClientConfig is used to make it easy to get an api server client +type ClientConfig interface { + // RawConfig returns the merged result of all overrides + RawConfig() (clientcmdapi.Config, error) + // ClientConfig returns a complete client config + ClientConfig() (*restclient.Config, error) + // Namespace returns the namespace resulting from the merged + // result of all overrides and a boolean indicating if it was + // overridden + Namespace() (string, bool, error) + // ConfigAccess returns the rules for loading/persisting the config. + ConfigAccess() ConfigAccess +} + +type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister + +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type DirectClientConfig struct { + config clientcmdapi.Config + contextName string + overrides *ConfigOverrides + fallbackReader io.Reader + configAccess ConfigAccess +} + +// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name +func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { + return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules()} +} + +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, nil, configAccess} +} + +// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess} +} + +func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { + return config.config, nil +} + +// ClientConfig implements ClientConfig +func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + configAuthInfo := config.getAuthInfo() + configClusterInfo := config.getCluster() + + clientConfig := &restclient.Config{} + clientConfig.Host = configClusterInfo.Server + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + if len(configAuthInfo.Impersonate) > 0 { + clientConfig.Impersonate = configAuthInfo.Impersonate + } + + // only try to read the auth information if we are secure + if restclient.IsConfigTransportTLS(*clientConfig) { + var err error + + // mergo is a first write wins for map value and a last writing wins for interface values + // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. + // Our mergo.Merge version is older than this change. + var persister restclient.AuthProviderConfigPersister + if config.configAccess != nil { + persister = PersisterForUser(config.configAccess, config.getAuthInfoName()) + } + userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + if err != nil { + return nil, err + } + mergo.Merge(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.Merge(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restclient.Config{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.Merge(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identifcation +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } + if len(configAuthInfo.Impersonate) > 0 { + mergedConfig.Impersonate = configAuthInfo.Impersonate + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + if configAuthInfo.AuthProvider != nil { + mergedConfig.AuthProvider = configAuthInfo.AuthProvider + mergedConfig.AuthConfigPersister = persistAuthConfig + } + + // if there still isn't enough information to authenticate the user, try prompting + if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { + prompter := NewPromptingAuthLoader(fallbackReader) + promptedAuthInfo := prompter.Prompt() + + promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) + previouslyMergedConfig := mergedConfig + mergedConfig = &restclient.Config{} + mergo.Merge(mergedConfig, promptedConfig) + mergo.Merge(mergedConfig, previouslyMergedConfig) + } + + return mergedConfig, nil +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information +func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { + config := &restclient.Config{} + config.Username = info.User + config.Password = info.Password + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + return config +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information +func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { + config := restclient.Config{} + config.CAFile = info.CAFile + if info.Insecure != nil { + config.Insecure = *info.Insecure + } + return config +} + +func canIdentifyUser(config restclient.Config) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 || + config.AuthProvider != nil +} + +// Namespace implements ClientConfig +func (config *DirectClientConfig) Namespace() (string, bool, error) { + if err := config.ConfirmUsable(); err != nil { + return "", false, err + } + + configContext := config.getContext() + + if len(configContext.Namespace) == 0 { + return api.NamespaceDefault, false, nil + } + + overridden := false + if config.overrides != nil && config.overrides.Context.Namespace != "" { + overridden = true + } + return configContext.Namespace, overridden, nil +} + +// ConfigAccess implements ClientConfig +func (config *DirectClientConfig) ConfigAccess() ConfigAccess { + return config.configAccess +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *DirectClientConfig) ConfirmUsable() error { + validationErrors := make([]error, 0) + validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) + validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +func (config *DirectClientConfig) getContextName() string { + if len(config.overrides.CurrentContext) != 0 { + return config.overrides.CurrentContext + } + if len(config.contextName) != 0 { + return config.contextName + } + + return config.config.CurrentContext +} + +func (config *DirectClientConfig) getAuthInfoName() string { + if len(config.overrides.Context.AuthInfo) != 0 { + return config.overrides.Context.AuthInfo + } + return config.getContext().AuthInfo +} + +func (config *DirectClientConfig) getClusterName() string { + if len(config.overrides.Context.Cluster) != 0 { + return config.overrides.Context.Cluster + } + return config.getContext().Cluster +} + +func (config *DirectClientConfig) getContext() clientcmdapi.Context { + contexts := config.config.Contexts + contextName := config.getContextName() + + var mergedContext clientcmdapi.Context + if configContext, exists := contexts[contextName]; exists { + mergo.Merge(&mergedContext, configContext) + } + mergo.Merge(&mergedContext, config.overrides.Context) + + return mergedContext +} + +func (config *DirectClientConfig) getAuthInfo() clientcmdapi.AuthInfo { + authInfos := config.config.AuthInfos + authInfoName := config.getAuthInfoName() + + var mergedAuthInfo clientcmdapi.AuthInfo + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.Merge(&mergedAuthInfo, configAuthInfo) + } + mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo) + + return mergedAuthInfo +} + +func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { + clusterInfos := config.config.Clusters + clusterInfoName := config.getClusterName() + + var mergedClusterInfo clientcmdapi.Cluster + mergo.Merge(&mergedClusterInfo, DefaultCluster) + mergo.Merge(&mergedClusterInfo, EnvVarCluster) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.Merge(&mergedClusterInfo, configClusterInfo) + } + mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo) + // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data + // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" + caLen := len(config.overrides.ClusterInfo.CertificateAuthority) + caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) + if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { + mergedClusterInfo.CertificateAuthority = "" + mergedClusterInfo.CertificateAuthorityData = nil + } + + return mergedClusterInfo +} + +// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. +type inClusterClientConfig struct{} + +func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") +} + +func (inClusterClientConfig) ClientConfig() (*restclient.Config, error) { + return restclient.InClusterConfig() +} + +func (inClusterClientConfig) Namespace() (string, error) { + // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. + // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up + if ns := os.Getenv("POD_NAMESPACE"); ns != "" { + return ns, nil + } + + // Fall back to the namespace associated with the service account token, if available + if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + return ns, nil + } + } + + return "default", nil +} + +func (inClusterClientConfig) ConfigAccess() ConfigAccess { + return NewDefaultClientConfigLoadingRules() +} + +// Possible returns true if loading an inside-kubernetes-cluster is possible. +func (inClusterClientConfig) Possible() bool { + fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") + return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + os.Getenv("KUBERNETES_SERVICE_PORT") != "" && + err == nil && !fi.IsDir() +} + +// BuildConfigFromFlags is a helper function that builds configs from a master +// url or a kubeconfig filepath. These are passed in as command line flags for cluster +// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath +// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback +// to the default config. +func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { + if kubeconfigPath == "" && masterUrl == "" { + glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + kubeconfig, err := restclient.InClusterConfig() + if err == nil { + return kubeconfig, nil + } + glog.Warning("error creating inClusterConfig, falling back to default config: ", err) + } + return NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() +} + +// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master +// url and a kubeconfigGetter. +func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { + // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. + cc := NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) + return cc.ClientConfig() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go new file mode 100644 index 00000000..049fc392 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go @@ -0,0 +1,419 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "os" + "path" + "path/filepath" + "reflect" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/client/restclient" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files +type ConfigAccess interface { + // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config + GetLoadingPrecedence() []string + // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules + GetStartingConfig() (*clientcmdapi.Config, error) + // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. + GetDefaultFilename() string + // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more + IsExplicitFile() bool + // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more + GetExplicitFile() string +} + +type PathOptions struct { + // GlobalFile is the full path to the file to load as the global (final) option + GlobalFile string + // EnvVar is the env var name that points to the list of kubeconfig files to load + EnvVar string + // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file + ExplicitFileFlag string + + // GlobalFileSubpath is an optional value used for displaying help + GlobalFileSubpath string + + LoadingRules *ClientConfigLoadingRules +} + +func (o *PathOptions) GetEnvVarFiles() []string { + if len(o.EnvVar) == 0 { + return []string{} + } + + envVarValue := os.Getenv(o.EnvVar) + if len(envVarValue) == 0 { + return []string{} + } + + return filepath.SplitList(envVarValue) +} + +func (o *PathOptions) GetLoadingPrecedence() []string { + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + return envVarFiles + } + + return []string{o.GlobalFile} +} + +func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { + // don't mutate the original + loadingRules := *o.LoadingRules + loadingRules.Precedence = o.GetLoadingPrecedence() + + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +func (o *PathOptions) GetDefaultFilename() string { + if o.IsExplicitFile() { + return o.GetExplicitFile() + } + + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + if len(envVarFiles) == 1 { + return envVarFiles[0] + } + + // if any of the envvar files already exists, return it + for _, envVarFile := range envVarFiles { + if _, err := os.Stat(envVarFile); err == nil { + return envVarFile + } + } + + // otherwise, return the last one in the list + return envVarFiles[len(envVarFiles)-1] + } + + return o.GlobalFile +} + +func (o *PathOptions) IsExplicitFile() bool { + if len(o.LoadingRules.ExplicitPath) > 0 { + return true + } + + return false +} + +func (o *PathOptions) GetExplicitFile() string { + return o.LoadingRules.ExplicitPath +} + +func NewDefaultPathOptions() *PathOptions { + ret := &PathOptions{ + GlobalFile: RecommendedHomeFile, + EnvVar: RecommendedConfigPathEnvVar, + ExplicitFileFlag: RecommendedConfigPathFlag, + + GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + + LoadingRules: NewDefaultClientConfigLoadingRules(), + } + ret.LoadingRules.DoNotResolvePaths = true + + return ret +} + +// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or +// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. +// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { + startingConfig, err := configAccess.GetStartingConfig() + if err != nil { + return err + } + + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { + // nothing to do + return nil + } + + if startingConfig.CurrentContext != newConfig.CurrentContext { + if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { + return err + } + } + + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { + if err := writePreferences(configAccess, newConfig.Preferences); err != nil { + return err + } + } + + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + t := *cluster + + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + configToWrite.Contexts[key] = context + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + delete(configToWrite.Clusters, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + delete(configToWrite.Contexts, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite := GetConfigFromFileOrDie(destinationFile) + delete(configToWrite.AuthInfos, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + return nil +} + +func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { + return &persister{configAccess, user} +} + +type persister struct { + configAccess ConfigAccess + user string +} + +func (p *persister) Persist(config map[string]string) error { + newConfig, err := p.configAccess.GetStartingConfig() + if err != nil { + return err + } + authInfo, ok := newConfig.AuthInfos[p.user] + if ok && authInfo.AuthProvider != nil { + authInfo.AuthProvider.Config = config + ModifyConfig(p.configAccess, *newConfig, false) + } + return nil +} + +// writeCurrentContext takes three possible paths. +// If newCurrentContext is the same as the startingConfig's current context, then we exit. +// If newCurrentContext has a value, then that value is written into the default destination file. +// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file +func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if startingConfig.CurrentContext == newCurrentContext { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig := GetConfigFromFileOrDie(file) + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + if len(newCurrentContext) > 0 { + destinationFile := configAccess.GetDefaultFilename() + config := GetConfigFromFileOrDie(destinationFile) + config.CurrentContext = newCurrentContext + + if err := WriteToFile(*config, destinationFile); err != nil { + return err + } + + return nil + } + + // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it + for _, file := range configAccess.GetLoadingPrecedence() { + if _, err := os.Stat(file); err == nil { + currConfig := GetConfigFromFileOrDie(file) + + if len(currConfig.CurrentContext) > 0 { + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + } + + return errors.New("no config found to write context") +} + +func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig := GetConfigFromFileOrDie(file) + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + for _, file := range configAccess.GetLoadingPrecedence() { + currConfig := GetConfigFromFileOrDie(file) + + if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + + return errors.New("no config found to write preferences") +} + +// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit +func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { + config, err := LoadFromFile(filename) + if err != nil && !os.IsNotExist(err) { + glog.FatalDepth(1, err) + } + + if config == nil { + return clientcmdapi.NewConfig() + } + + return config +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go new file mode 100644 index 00000000..7e8f9b4e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package clientcmd provides one stop shopping for building a working client from a fixed config, +from a .kubeconfig file, from command line flags, or from any merged combination. + +Sample usage from merged .kubeconfig files (local directory, home directory) + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // if you want to change the loading rules (which files in which order), you can do so here + + configOverrides := &clientcmd.ConfigOverrides{} + // if you want to change override values or bind them to flags, there are methods to help you + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + // Do something + } + client, err := unversioned.New(config) + // ... +*/ +package clientcmd diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go new file mode 100644 index 00000000..3d2df1f7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go @@ -0,0 +1,580 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + goruntime "runtime" + "strings" + + "github.com/golang/glog" + "github.com/imdario/mergo" + + "k8s.io/kubernetes/pkg/api/unversioned" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/homedir" +) + +const ( + RecommendedConfigPathFlag = "kubeconfig" + RecommendedConfigPathEnvVar = "KUBECONFIG" + RecommendedHomeDir = ".kube" + RecommendedFileName = "config" + RecommendedSchemaName = "schema" +) + +var RecommendedHomeFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedFileName) +var RecommendedSchemaFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedSchemaName) + +// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. +// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make +// sure existing config files are migrated to their new locations properly. +func currentMigrationRules() map[string]string { + oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") + oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) + + migrationRules := map[string]string{} + migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile + if goruntime.GOOS == "windows" { + migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile + } + return migrationRules +} + +type ClientConfigLoader interface { + ConfigAccess + Load() (*clientcmdapi.Config, error) +} + +type KubeconfigGetter func() (*clientcmdapi.Config, error) + +type ClientConfigGetter struct { + kubeconfigGetter KubeconfigGetter +} + +// ClientConfigGetter implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigGetter{} + +func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { + return g.kubeconfigGetter() +} + +func (g *ClientConfigGetter) GetLoadingPrecedence() []string { + return nil +} +func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { + return nil, nil +} +func (g *ClientConfigGetter) GetDefaultFilename() string { + return "" +} +func (g *ClientConfigGetter) IsExplicitFile() bool { + return false +} +func (g *ClientConfigGetter) GetExplicitFile() string { + return "" +} + +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present +type ClientConfigLoadingRules struct { + ExplicitPath string + Precedence []string + + // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked. + // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. + MigrationRules map[string]string + + // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so + // that a default object that doesn't set this will usually get the behavior it wants. + DoNotResolvePaths bool +} + +// ClientConfigLoadingRules implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigLoadingRules{} + +// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to +// use this constructor +func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { + chain := []string{} + + envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) + if len(envVarFiles) != 0 { + chain = append(chain, filepath.SplitList(envVarFiles)...) + + } else { + chain = append(chain, RecommendedHomeFile) + } + + return &ClientConfigLoadingRules{ + Precedence: chain, + MigrationRules: currentMigrationRules(), + } +} + +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { + if err := rules.Migrate(); err != nil { + return nil, err + } + + errlist := []error{} + + kubeConfigFiles := []string{} + + // Make sure a file we were explicitly told to use exists + if len(rules.ExplicitPath) > 0 { + if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { + return nil, err + } + kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) + + } else { + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + } + + kubeconfigs := []*clientcmdapi.Config{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := LoadFromFile(filename) + if os.IsNotExist(err) { + // skip missing files + continue + } + if err != nil { + errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + // first merge all of our maps + mapConfig := clientcmdapi.NewConfig() + for _, kubeconfig := range kubeconfigs { + mergo.Merge(mapConfig, kubeconfig) + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdapi.NewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + mergo.Merge(nonMapConfig, kubeconfig) + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdapi.NewConfig() + mergo.Merge(config, mapConfig) + mergo.Merge(config, nonMapConfig) + + if rules.ResolvePaths() { + if err := ResolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + } + + return config, utilerrors.NewAggregate(errlist) +} + +// Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked. +// If the source file is present, then it is copied to the destination file BEFORE any further loading happens. +func (rules *ClientConfigLoadingRules) Migrate() error { + if rules.MigrationRules == nil { + return nil + } + + for destination, source := range rules.MigrationRules { + if _, err := os.Stat(destination); err == nil { + // if the destination already exists, do nothing + continue + } else if !os.IsNotExist(err) { + // if we had an error other than non-existence, fail + return err + } + + if sourceInfo, err := os.Stat(source); err != nil { + if os.IsNotExist(err) { + // if the source file doesn't exist, there's no work to do. + continue + } + + // if we had an error other than non-existence, fail + return err + } else if sourceInfo.IsDir() { + return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) + } + + in, err := os.Open(source) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(destination) + if err != nil { + return err + } + defer out.Close() + + if _, err = io.Copy(out, in); err != nil { + return err + } + } + + return nil +} + +// GetLoadingPrecedence implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + return rules.Precedence +} + +// GetStartingConfig implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +// GetDefaultFilename implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { + // Explicit file if we have one. + if rules.IsExplicitFile() { + return rules.GetExplicitFile() + } + // Otherwise, first existing file from precedence. + for _, filename := range rules.GetLoadingPrecedence() { + if _, err := os.Stat(filename); err == nil { + return filename + } + } + // If none exists, use the first from precedence. + if len(rules.Precedence) > 0 { + return rules.Precedence[0] + } + return "" +} + +// IsExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { + return len(rules.ExplicitPath) > 0 +} + +// GetExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetExplicitFile() string { + return rules.ExplicitPath +} + +// LoadFromFile takes a filename and deserializes the contents into Config object +func LoadFromFile(filename string) (*clientcmdapi.Config, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := Load(kubeconfigBytes) + if err != nil { + return nil, err + } + glog.V(6).Infoln("Config loaded from file", filename) + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdapi.Cluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdapi.Context{} + } + + return config, nil +} + +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func Load(data []byte) (*clientcmdapi.Config, error) { + config := clientcmdapi.NewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + decoded, _, err := clientcmdlatest.Codec.Decode(data, &unversioned.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) + if err != nil { + return nil, err + } + return decoded.(*clientcmdapi.Config), nil +} + +// WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present +// it stomps the contents +func WriteToFile(config clientcmdapi.Config, filename string) error { + content, err := Write(config) + if err != nil { + return err + } + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + err = lockFile(filename) + if err != nil { + return err + } + defer unlockFile(filename) + + if err := ioutil.WriteFile(filename, content, 0600); err != nil { + return err + } + return nil +} + +func lockFile(filename string) error { + // TODO: find a way to do this with actual file locks. Will + // probably need seperate solution for windows and linux. + f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) + if err != nil { + return err + } + f.Close() + return nil +} + +func unlockFile(filename string) error { + return os.Remove(lockName(filename)) +} + +func lockName(filename string) string { + return filename + ".lock" +} + +// Write serializes the config to yaml. +// Encapsulates serialization without assuming the destination is a file. +func Write(config clientcmdapi.Config) ([]byte, error) { + return runtime.Encode(clientcmdlatest.Codec, &config) +} + +func (rules ClientConfigLoadingRules) ResolvePaths() bool { + return !rules.DoNotResolvePaths +} + +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func ResolveLocalPaths(config *clientcmdapi.Config) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { + if len(cluster.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %s", cluster.Server) + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { + return err + } + + return nil +} + +// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { + if len(authInfo.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %v", authInfo) + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + + return nil +} + +func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { + return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) +} + +func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { + return ResolvePaths(GetConfigFileReferences(config), base) +} + +func GetConfigFileReferences(config *clientcmdapi.Config) []*string { + refs := []*string{} + + for _, cluster := range config.Clusters { + refs = append(refs, GetClusterFileReferences(cluster)...) + } + for _, authInfo := range config.AuthInfos { + refs = append(refs, GetAuthInfoFileReferences(authInfo)...) + } + + return refs +} + +func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths + if len(*ref) > 0 { + rel, err := MakeRelative(*ref, base) + if err != nil { + return err + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} + +func MakeRelative(path, base string) (string, error) { + if len(path) > 0 { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go new file mode 100644 index 00000000..52c1493d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go @@ -0,0 +1,122 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "io" + "reflect" + "sync" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/client/restclient" + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type DeferredLoadingClientConfig struct { + loader ClientConfigLoader + overrides *ConfigOverrides + fallbackReader io.Reader + + clientConfig ClientConfig + loadingLock sync.Mutex +} + +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides} +} + +// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, fallbackReader: fallbackReader} +} + +func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { + if config.clientConfig == nil { + config.loadingLock.Lock() + defer config.loadingLock.Unlock() + + if config.clientConfig == nil { + mergedConfig, err := config.loader.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig ClientConfig + if config.fallbackReader != nil { + mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) + } else { + mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) + } + + config.clientConfig = mergedClientConfig + } + } + + return config.clientConfig, nil +} + +func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { + mergedConfig, err := config.createClientConfig() + if err != nil { + return clientcmdapi.Config{}, err + } + + return mergedConfig.RawConfig() +} + +// ClientConfig implements ClientConfig +func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + + mergedConfig, err := mergedClientConfig.ClientConfig() + if err != nil { + return nil, err + } + // Are we running in a cluster and were no other configs found? If so, use the in-cluster-config. + icc := inClusterClientConfig{} + defaultConfig, err := DefaultClientConfig.ClientConfig() + if icc.Possible() && err == nil && reflect.DeepEqual(mergedConfig, defaultConfig) { + glog.V(2).Info("No kubeconfig could be created, falling back to service account.") + return icc.ClientConfig() + } + return mergedConfig, nil +} + +// Namespace implements KubeConfig +func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { + mergedKubeConfig, err := config.createClientConfig() + if err != nil { + return "", false, err + } + + return mergedKubeConfig.Namespace() +} + +// ConfigAccess implements ClientConfig +func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { + return config.loader +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go new file mode 100644 index 00000000..f6dda97f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "strconv" + + "github.com/spf13/pflag" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" +) + +// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't +// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" +type ConfigOverrides struct { + AuthInfo clientcmdapi.AuthInfo + ClusterInfo clientcmdapi.Cluster + Context clientcmdapi.Context + CurrentContext string +} + +// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly +// corresponds to ConfigOverrides +type ConfigOverrideFlags struct { + AuthOverrideFlags AuthOverrideFlags + ClusterOverrideFlags ClusterOverrideFlags + ContextOverrideFlags ContextOverrideFlags + CurrentContext FlagInfo +} + +// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects +type AuthOverrideFlags struct { + ClientCertificate FlagInfo + ClientKey FlagInfo + Token FlagInfo + Impersonate FlagInfo + Username FlagInfo + Password FlagInfo +} + +// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects +type ContextOverrideFlags struct { + ClusterName FlagInfo + AuthInfoName FlagInfo + Namespace FlagInfo +} + +// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects +type ClusterOverrideFlags struct { + APIServer FlagInfo + APIVersion FlagInfo + CertificateAuthority FlagInfo + InsecureSkipTLSVerify FlagInfo +} + +// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to +// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for +// coherent extension, without full prescription +type FlagInfo struct { + // LongName is the long string for a flag. If this is empty, then the flag will not be bound + LongName string + // ShortName is the single character for a flag. If this is empty, then there will be no short flag + ShortName string + // Default is the default value for the flag + Default string + // Description is the description for the flag + Description string +} + +// BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) + } +} + +// BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + // try to parse Default as a bool. If it fails, assume false + boolVal, err := strconv.ParseBool(f.Default) + if err != nil { + boolVal = false + } + + flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) + } +} + +const ( + FlagClusterName = "cluster" + FlagAuthInfoName = "user" + FlagContext = "context" + FlagNamespace = "namespace" + FlagAPIServer = "server" + FlagAPIVersion = "api-version" + FlagInsecure = "insecure-skip-tls-verify" + FlagCertFile = "client-certificate" + FlagKeyFile = "client-key" + FlagCAFile = "certificate-authority" + FlagEmbedCerts = "embed-certs" + FlagBearerToken = "token" + FlagImpersonate = "as" + FlagUsername = "username" + FlagPassword = "password" +) + +// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { + return AuthOverrideFlags{ + ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS."}, + ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS."}, + Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server."}, + Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation."}, + Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server."}, + Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server."}, + } +} + +// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { + return ClusterOverrideFlags{ + APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, + APIVersion: FlagInfo{prefix + FlagAPIVersion, "", "", "DEPRECATED: The API version to use when talking to the server"}, + CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert. file for the certificate authority."}, + InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure."}, + } +} + +// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { + return ConfigOverrideFlags{ + AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), + ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), + ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + } +} + +// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { + return ContextOverrideFlags{ + ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, + AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, + Namespace: FlagInfo{prefix + FlagNamespace, "", "", "If present, the namespace scope for this CLI request."}, + } +} + +// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables +func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { + flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate) + flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey) + flagNames.Token.BindStringFlag(flags, &authInfo.Token) + flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate) + flagNames.Username.BindStringFlag(flags, &authInfo.Username) + flagNames.Password.BindStringFlag(flags, &authInfo.Password) +} + +// BindClusterFlags is a convenience method to bind the specified flags to their associated variables +func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) { + flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server) + // TODO: remove --api-version flag in 1.3. + flagNames.APIVersion.BindStringFlag(flags, &clusterInfo.APIVersion) + flags.MarkDeprecated(FlagAPIVersion, "flag is no longer respected and will be deleted in the next release") + flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) + flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) +} + +// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables +func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { + BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) + BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) + BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) + flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) +} + +// BindFlags is a convenience method to bind the specified flags to their associated variables +func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { + flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) + flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo) + flagNames.Namespace.BindStringFlag(flags, &contextInfo.Namespace) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go new file mode 100644 index 00000000..1690f515 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go @@ -0,0 +1,270 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + + clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/validation" +) + +var ( + ErrNoContext = errors.New("no context chosen") + ErrEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + ErrEmptyCluster = errors.New("cluster has no server defined") +) + +type errContextNotFound struct { + ContextName string +} + +func (e *errContextNotFound) Error() string { + return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) +} + +// IsContextNotFound returns a boolean indicating whether the error is known to +// report that a context was not found +func IsContextNotFound(err error) bool { + if err == nil { + return false + } + if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { + return true + } + return strings.Contains(err.Error(), "context was not found for specified context") +} + +// IsEmptyConfig returns true if the provided error indicates the provided configuration +// is empty. +func IsEmptyConfig(err error) bool { + switch t := err.(type) { + case errConfigurationInvalid: + return len(t) == 1 && t[0] == ErrEmptyConfig + } + return err == ErrEmptyConfig +} + +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +// errConfigurationInvalid implements error and Aggregate +var _ error = errConfigurationInvalid{} +var _ utilerrors.Aggregate = errConfigurationInvalid{} + +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) +} + +// Errors implements the AggregateError interface +func (e errConfigurationInvalid) Errors() []error { + return e +} + +// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. +func IsConfigurationInvalid(err error) bool { + switch err.(type) { + case *errContextNotFound, errConfigurationInvalid: + return true + } + return IsContextNotFound(err) +} + +// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. +func Validate(config clientcmdapi.Config) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + if len(config.CurrentContext) != 0 { + if _, exists := config.Contexts[config.CurrentContext]; !exists { + validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) + } + } + + for contextName, context := range config.Contexts { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + } + + for authInfoName, authInfo := range config.AuthInfos { + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) + } + + for clusterName, clusterInfo := range config.Clusters { + validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + var contextName string + if len(passedContextName) != 0 { + contextName = passedContextName + } else { + contextName = config.CurrentContext + } + + if len(contextName) == 0 { + return ErrNoContext + } + + context, exists := config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + + if exists { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { + validationErrors := make([]error, 0) + + if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) { + return []error{ErrEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { + validationErrors := make([]error, 0) + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return +func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { + validationErrors := make([]error, 0) + + if len(context.AuthInfo) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) + } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) + } + + if len(context.Cluster) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) + } else if _, exists := config.Clusters[context.Cluster]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) + } + + if len(context.Namespace) != 0 { + if len(validation.IsDNS1123Label(context.Namespace)) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) + } + } + + return validationErrors +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go new file mode 100644 index 00000000..2a9d7984 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRoleBindings has methods to work with ClusterRoleBinding resources in a namespace +type ClusterRoleBindings interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) + Get(name string) (*rbac.ClusterRoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + Create(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Update(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// clusterRoleBindings implements ClusterRoleBindingsNamespacer interface +type clusterRoleBindings struct { + client *RbacClient +} + +// newClusterRoleBindings returns a clusterRoleBindings +func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c, + } +} + +// List takes label and field selectors, and returns the list of clusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { + result = &rbac.ClusterRoleBindingList{} + err = c.client.Get().Resource("clusterrolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the clusterRoleBinding, and returns the corresponding ClusterRoleBinding object, and an error if it occurs +func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Get().Resource("clusterrolebindings").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Resource("clusterrolebindings").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs. +func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Post().Resource("clusterrolebindings").Body(clusterRoleBinding).Do().Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs. +func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + result = &rbac.ClusterRoleBinding{} + err = c.client.Put().Resource("clusterrolebindings").Name(clusterRoleBinding.Name).Body(clusterRoleBinding).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go new file mode 100644 index 00000000..0d2d375d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// ClusterRoles has methods to work with ClusterRole resources in a namespace +type ClusterRoles interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + List(opts api.ListOptions) (*rbac.ClusterRoleList, error) + Get(name string) (*rbac.ClusterRole, error) + Delete(name string, options *api.DeleteOptions) error + Create(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error) + Update(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// clusterRoles implements ClusterRolesNamespacer interface +type clusterRoles struct { + client *RbacClient +} + +// newClusterRoles returns a clusterRoles +func newClusterRoles(c *RbacClient) *clusterRoles { + return &clusterRoles{ + client: c, + } +} + +// List takes label and field selectors, and returns the list of clusterRoles that match those selectors. +func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { + result = &rbac.ClusterRoleList{} + err = c.client.Get().Resource("clusterroles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the clusterRole, and returns the corresponding ClusterRole object, and an error if it occurs +func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Get().Resource("clusterroles").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Resource("clusterroles").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if it occurs. +func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Post().Resource("clusterroles").Body(clusterRole).Do().Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if it occurs. +func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + result = &rbac.ClusterRole{} + err = c.client.Put().Resource("clusterroles").Name(clusterRole.Name).Body(clusterRole).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go new file mode 100644 index 00000000..0717cdec --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" +) + +type ComponentStatusesInterface interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface contains methods to retrieve ComponentStatus +type ComponentStatusInterface interface { + List(opts api.ListOptions) (*api.ComponentStatusList, error) + Get(name string) (*api.ComponentStatus, error) + + // TODO: It'd be nice to have watch support at some point + //Watch(opts api.ListOptions) (watch.Interface, error) +} + +// componentStatuses implements ComponentStatusesInterface +type componentStatuses struct { + client *Client +} + +func newComponentStatuses(c *Client) *componentStatuses { + return &componentStatuses{c} +} + +func (c *componentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { + result = &api.ComponentStatusList{} + err = c.client.Get(). + Resource("componentStatuses"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) { + result = &api.ComponentStatus{} + err = c.client.Get().Resource("componentStatuses").Name(name).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go new file mode 100644 index 00000000..f68f98fe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" +) + +// ControllerHasDesiredReplicas returns a condition that will be true if and only if +// the desired replica count for a controller's ReplicaSelector equals the Replicas count. +func ControllerHasDesiredReplicas(c Interface, controller *api.ReplicationController) wait.ConditionFunc { + + // If we're given a controller where the status lags the spec, it either means that the controller is stale, + // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. + desiredGeneration := controller.Generation + + return func() (bool, error) { + ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name) + if err != nil { + return false, err + } + // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, + // or, after this check has passed, a modification causes the rc manager to create more pods. + // This will not be an issue once we've implemented graceful delete for rcs, but till then + // concurrent stop operations on the same rc might have unintended side effects. + return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == ctrl.Spec.Replicas, nil + } +} + +// ReplicaSetHasDesiredReplicas returns a condition that will be true if and only if +// the desired replica count for a ReplicaSet's ReplicaSelector equals the Replicas count. +func ReplicaSetHasDesiredReplicas(c ExtensionsInterface, replicaSet *extensions.ReplicaSet) wait.ConditionFunc { + + // If we're given a ReplicaSet where the status lags the spec, it either means that the + // ReplicaSet is stale, or that the ReplicaSet manager hasn't noticed the update yet. + // Polling status.Replicas is not safe in the latter case. + desiredGeneration := replicaSet.Generation + + return func() (bool, error) { + rs, err := c.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name) + if err != nil { + return false, err + } + // There's a chance a concurrent update modifies the Spec.Replicas causing this check to + // pass, or, after this check has passed, a modification causes the ReplicaSet manager to + // create more pods. This will not be an issue once we've implemented graceful delete for + // ReplicaSets, but till then concurrent stop operations on the same ReplicaSet might have + // unintended side effects. + return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == rs.Spec.Replicas, nil + } +} + +// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count +// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. +func JobHasDesiredParallelism(c BatchInterface, job *batch.Job) wait.ConditionFunc { + + return func() (bool, error) { + job, err := c.Jobs(job.Namespace).Get(job.Name) + if err != nil { + return false, err + } + + // desired parallelism can be either the exact number, in which case return immediately + if job.Status.Active == *job.Spec.Parallelism { + return true, nil + } + if job.Spec.Completions == nil { + // A job without specified completions needs to wait for Active to reach Parallelism. + return false, nil + } else { + // otherwise count successful + progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded + return progress == 0, nil + } + } +} + +// DeploymentHasDesiredReplicas returns a condition that will be true if and only if +// the desired replica count for a deployment equals its updated replicas count. +// (non-terminated pods that have the desired template spec). +func DeploymentHasDesiredReplicas(c ExtensionsInterface, deployment *extensions.Deployment) wait.ConditionFunc { + // If we're given a deployment where the status lags the spec, it either + // means that the deployment is stale, or that the deployment manager hasn't + // noticed the update yet. Polling status.Replicas is not safe in the latter + // case. + desiredGeneration := deployment.Generation + + return func() (bool, error) { + deployment, err := c.Deployments(deployment.Namespace).Get(deployment.Name) + if err != nil { + return false, err + } + return deployment.Status.ObservedGeneration >= desiredGeneration && + deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil + } +} + +// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that +// the pod has already reached completed state. +var ErrPodCompleted = fmt.Errorf("pod ran to completion") + +// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, +// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. +func PodRunning(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodRunning: + return true, nil + case api.PodFailed, api.PodSucceeded: + return false, ErrPodCompleted + } + } + return false, nil +} + +// PodCompleted returns true if the pod has run to completion, false if the pod has not yet +// reached running state, or an error in any other case. +func PodCompleted(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodFailed, api.PodSucceeded: + return true, nil + } + } + return false, nil +} + +// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not +// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or +// an error in any other case. +func PodRunningAndReady(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodFailed, api.PodSucceeded: + return false, ErrPodCompleted + case api.PodRunning: + return api.IsPodReady(t), nil + } + } + return false, nil +} + +// PodNotPending returns true if the pod has left the pending state, false if it has not, +// or an error in any other case (such as if the pod was deleted). +func PodNotPending(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodPending: + return false, nil + default: + return true, nil + } + } + return false, nil +} + +// PodContainerRunning returns false until the named container has ContainerStatus running (at least once), +// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. +func PodContainerRunning(containerName string) watch.ConditionFunc { + return func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") + } + switch t := event.Object.(type) { + case *api.Pod: + switch t.Status.Phase { + case api.PodRunning, api.PodPending: + case api.PodFailed, api.PodSucceeded: + return false, ErrPodCompleted + default: + return false, nil + } + for _, s := range t.Status.ContainerStatuses { + if s.Name != containerName { + continue + } + return s.State.Running != nil, nil + } + return false, nil + } + return false, nil + } +} + +// ServiceAccountHasSecrets returns true if the service account has at least one secret, +// false if it does not, or an error. +func ServiceAccountHasSecrets(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, errors.NewNotFound(unversioned.GroupResource{Resource: "serviceaccounts"}, "") + } + switch t := event.Object.(type) { + case *api.ServiceAccount: + return len(t.Secrets) > 0, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go new file mode 100644 index 00000000..60fffa75 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go @@ -0,0 +1,122 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + ConfigMapResourceName string = "configmaps" +) + +type ConfigMapsNamespacer interface { + ConfigMaps(namespace string) ConfigMapsInterface +} + +type ConfigMapsInterface interface { + Get(string) (*api.ConfigMap, error) + List(opts api.ListOptions) (*api.ConfigMapList, error) + Create(*api.ConfigMap) (*api.ConfigMap, error) + Delete(string) error + Update(*api.ConfigMap) (*api.ConfigMap, error) + Watch(api.ListOptions) (watch.Interface, error) +} + +type ConfigMaps struct { + client *Client + namespace string +} + +// ConfigMaps should implement ConfigMapsInterface +var _ ConfigMapsInterface = &ConfigMaps{} + +func newConfigMaps(c *Client, ns string) *ConfigMaps { + return &ConfigMaps{ + client: c, + namespace: ns, + } +} + +func (c *ConfigMaps) Get(name string) (*api.ConfigMap, error) { + result := &api.ConfigMap{} + err := c.client.Get(). + Namespace(c.namespace). + Resource(ConfigMapResourceName). + Name(name). + Do(). + Into(result) + + return result, err +} + +func (c *ConfigMaps) List(opts api.ListOptions) (*api.ConfigMapList, error) { + result := &api.ConfigMapList{} + err := c.client.Get(). + Namespace(c.namespace). + Resource(ConfigMapResourceName). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +func (c *ConfigMaps) Create(cfg *api.ConfigMap) (*api.ConfigMap, error) { + result := &api.ConfigMap{} + err := c.client.Post(). + Namespace(c.namespace). + Resource(ConfigMapResourceName). + Body(cfg). + Do(). + Into(result) + + return result, err +} + +func (c *ConfigMaps) Delete(name string) error { + return c.client.Delete(). + Namespace(c.namespace). + Resource(ConfigMapResourceName). + Name(name). + Do(). + Error() +} + +func (c *ConfigMaps) Update(cfg *api.ConfigMap) (*api.ConfigMap, error) { + result := &api.ConfigMap{} + + err := c.client.Put(). + Namespace(c.namespace). + Resource(ConfigMapResourceName). + Name(cfg.Name). + Body(cfg). + Do(). + Into(result) + + return result, err +} + +func (c *ConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.namespace). + Resource(ConfigMapResourceName). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go new file mode 100644 index 00000000..30638685 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "strconv" + + cadvisorapi "github.com/google/cadvisor/info/v1" +) + +type ContainerInfoGetter interface { + // GetContainerInfo returns information about a container. + GetContainerInfo(host, podID, containerID string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + // GetRootInfo returns information about the root container on a machine. + GetRootInfo(host string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + // GetMachineInfo returns the machine's information like number of cores, memory capacity. + GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error) +} + +type HTTPContainerInfoGetter struct { + Client *http.Client + Port int +} + +func (self *HTTPContainerInfoGetter) GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error) { + request, err := http.NewRequest( + "GET", + fmt.Sprintf("http://%v/spec", + net.JoinHostPort(host, strconv.Itoa(self.Port)), + ), + nil, + ) + if err != nil { + return nil, err + } + + response, err := self.Client.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return nil, fmt.Errorf("trying to get machine spec from %v; received status %v", + host, response.Status) + } + var minfo cadvisorapi.MachineInfo + err = json.NewDecoder(response.Body).Decode(&minfo) + if err != nil { + return nil, err + } + return &minfo, nil +} + +func (self *HTTPContainerInfoGetter) getContainerInfo(host, path string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + var body io.Reader + if req != nil { + content, err := json.Marshal(req) + if err != nil { + return nil, err + } + body = bytes.NewBuffer(content) + } + + request, err := http.NewRequest( + "GET", + fmt.Sprintf("http://%v/stats/%v", + net.JoinHostPort(host, strconv.Itoa(self.Port)), + path, + ), + body, + ) + if err != nil { + return nil, err + } + + response, err := self.Client.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return nil, fmt.Errorf("trying to get info for %v from %v; received status %v", + path, host, response.Status) + } + var cinfo cadvisorapi.ContainerInfo + err = json.NewDecoder(response.Body).Decode(&cinfo) + if err != nil { + return nil, err + } + return &cinfo, nil +} + +func (self *HTTPContainerInfoGetter) GetContainerInfo(host, podID, containerID string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return self.getContainerInfo( + host, + fmt.Sprintf("%v/%v", podID, containerID), + req, + ) +} + +func (self *HTTPContainerInfoGetter) GetRootInfo(host string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return self.getContainerInfo(host, "", req) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go new file mode 100644 index 00000000..fa12591a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// DaemonsSetsNamespacer has methods to work with DaemonSet resources in a namespace +type DaemonSetsNamespacer interface { + DaemonSets(namespace string) DaemonSetInterface +} + +type DaemonSetInterface interface { + List(opts api.ListOptions) (*extensions.DaemonSetList, error) + Get(name string) (*extensions.DaemonSet, error) + Create(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error) + Update(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error) + UpdateStatus(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// daemonSets implements DaemonsSetsNamespacer interface +type daemonSets struct { + r *ExtensionsClient + ns string +} + +func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { + return &daemonSets{c, namespace} +} + +// Ensure statically that daemonSets implements DaemonSetsInterface. +var _ DaemonSetInterface = &daemonSets{} + +func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { + result = &extensions.DaemonSetList{} + err = c.r.Get().Namespace(c.ns).Resource("daemonsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular daemon set. +func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.r.Get().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Into(result) + return +} + +// Create creates a new daemon set. +func (c *daemonSets) Create(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.r.Post().Namespace(c.ns).Resource("daemonsets").Body(daemon).Do().Into(result) + return +} + +// Update updates an existing daemon set. +func (c *daemonSets) Update(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).Body(daemon).Do().Into(result) + return +} + +// UpdateStatus updates an existing daemon set status +func (c *daemonSets) UpdateStatus(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { + result = &extensions.DaemonSet{} + err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).SubResource("status").Body(daemon).Do().Into(result) + return +} + +// Delete deletes an existing daemon set. +func (c *daemonSets) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested daemon sets. +func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go new file mode 100644 index 00000000..cafd4cfd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go @@ -0,0 +1,111 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// DeploymentsNamespacer has methods to work with Deployment resources in a namespace +type DeploymentsNamespacer interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + List(opts api.ListOptions) (*extensions.DeploymentList, error) + Get(name string) (*extensions.Deployment, error) + Delete(name string, options *api.DeleteOptions) error + Create(*extensions.Deployment) (*extensions.Deployment, error) + Update(*extensions.Deployment) (*extensions.Deployment, error) + UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Rollback(*extensions.DeploymentRollback) error +} + +// deployments implements DeploymentInterface +type deployments struct { + client *ExtensionsClient + ns string +} + +// Ensure statically that deployments implements DeploymentInterface. +var _ DeploymentInterface = &deployments{} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsClient, namespace string) *deployments { + return &deployments{ + client: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { + result = &extensions.DeploymentList{} + err = c.client.Get().Namespace(c.ns).Resource("deployments").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Get().Namespace(c.ns).Resource("deployments").Name(name).Do().Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Namespace(c.ns).Resource("deployments").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Post().Namespace(c.ns).Resource("deployments").Body(deployment).Do().Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).Body(deployment).Do().Into(result) + return +} + +func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { + result = &extensions.Deployment{} + err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).SubResource("status").Body(deployment).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go new file mode 100644 index 00000000..252d8097 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package unversioned contains the implementation of the client side communication with the +Kubernetes master. The Client class provides methods for reading, creating, updating, +and deleting pods, replication controllers, daemons, services, and nodes. + +Most consumers should use the Config object to create a Client: + + import ( + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/api" + ) + + [...] + + config := &client.Config{ + Host: "http://localhost:8080", + Username: "test", + Password: "password", + } + client, err := client.New(config) + if err != nil { + // handle error + } + pods, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{}) + if err != nil { + // handle error + } + +More advanced consumers may wish to provide their own transport via a http.RoundTripper: + + config := &client.Config{ + Host: "https://localhost:8080", + Transport: oauthclient.Transport(), + } + client, err := client.New(config) + +The RESTClient type implements the Kubernetes API conventions (see `docs/devel/api-conventions.md`) +for a given API path and is intended for use by consumers implementing their own Kubernetes +compatible APIs. +*/ +package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go new file mode 100644 index 00000000..c58c88a2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// EndpointsNamespacer has methods to work with Endpoints resources in a namespace +type EndpointsNamespacer interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources +type EndpointsInterface interface { + Create(endpoints *api.Endpoints) (*api.Endpoints, error) + List(opts api.ListOptions) (*api.EndpointsList, error) + Get(name string) (*api.Endpoints, error) + Delete(name string) error + Update(endpoints *api.Endpoints) (*api.Endpoints, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// endpoints implements EndpointsInterface +type endpoints struct { + r *Client + ns string +} + +// newEndpoints returns a endpoints +func newEndpoints(c *Client, namespace string) *endpoints { + return &endpoints{c, namespace} +} + +// Create creates a new endpoint. +func (c *endpoints) Create(endpoints *api.Endpoints) (*api.Endpoints, error) { + result := &api.Endpoints{} + err := c.r.Post().Namespace(c.ns).Resource("endpoints").Body(endpoints).Do().Into(result) + return result, err +} + +// List takes a selector, and returns the list of endpoints that match that selector +func (c *endpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { + result = &api.EndpointsList{} + err = c.r.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Get returns information about the endpoints for a particular service. +func (c *endpoints) Get(name string) (result *api.Endpoints, err error) { + result = &api.Endpoints{} + err = c.r.Get().Namespace(c.ns).Resource("endpoints").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the endpoint, and returns an error if one occurs +func (c *endpoints) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("endpoints").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested endpoints for a service. +func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +func (c *endpoints) Update(endpoints *api.Endpoints) (*api.Endpoints, error) { + result := &api.Endpoints{} + err := c.r.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go new file mode 100644 index 00000000..b882ccdc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go @@ -0,0 +1,219 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// EventNamespacer can return an EventInterface for the given namespace. +type EventNamespacer interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources +type EventInterface interface { + Create(event *api.Event) (*api.Event, error) + Update(event *api.Event) (*api.Event, error) + Patch(event *api.Event, data []byte) (*api.Event, error) + List(opts api.ListOptions) (*api.EventList, error) + Get(name string) (*api.Event, error) + Watch(opts api.ListOptions) (watch.Interface, error) + // Search finds events about the specified object + Search(objOrRef runtime.Object) (*api.EventList, error) + Delete(name string) error + // DeleteCollection deletes a collection of events. + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// events implements Events interface +type events struct { + client *Client + namespace string +} + +// newEvents returns a new events object. +func newEvents(c *Client, ns string) *events { + return &events{ + client: c, + namespace: ns, + } +} + +// Create makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) Create(event *api.Event) (*api.Event, error) { + if e.namespace != "" && event.Namespace != e.namespace { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.namespace) + } + result := &api.Event{} + err := e.client.Post(). + Namespace(event.Namespace). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// Update modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) Update(event *api.Event) (*api.Event, error) { + result := &api.Event{} + err := e.client.Put(). + Namespace(event.Namespace). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// Patch modifies an existing event. It returns the copy of the event that the server returns, or an +// error. The namespace and name of the target event is deduced from the incompleteEvent. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) Patch(incompleteEvent *api.Event, data []byte) (*api.Event, error) { + result := &api.Event{} + err := e.client.Patch(api.StrategicMergePatchType). + Namespace(incompleteEvent.Namespace). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// List returns a list of events matching the selectors. +func (e *events) List(opts api.ListOptions) (*api.EventList, error) { + result := &api.EventList{} + err := e.client.Get(). + Namespace(e.namespace). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return result, err +} + +// Get returns the given event, or an error. +func (e *events) Get(name string) (*api.Event, error) { + result := &api.Event{} + err := e.client.Get(). + Namespace(e.namespace). + Resource("events"). + Name(name). + Do(). + Into(result) + return result, err +} + +// Watch starts watching for events matching the given selectors. +func (e *events) Watch(opts api.ListOptions) (watch.Interface, error) { + return e.client.Get(). + Prefix("watch"). + Namespace(e.namespace). + Resource("events"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { + ref, err := api.GetReference(objOrRef) + if err != nil { + return nil, err + } + if e.namespace != "" && ref.Namespace != e.namespace { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.namespace) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(api.ListOptions{FieldSelector: fieldSelector}) +} + +// Delete deletes an existing event. +func (e *events) Delete(name string) error { + return e.client.Delete(). + Namespace(e.namespace). + Resource("events"). + Name(name). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (e *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return e.client.Delete(). + Namespace(e.namespace). + Resource("events"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go new file mode 100644 index 00000000..3c9114d9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go @@ -0,0 +1,138 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// Interface holds the experimental methods for clients of Kubernetes +// to allow mock testing. +// Features of Extensions group are not supported and may be changed or removed in +// incompatible ways at any time. +type ExtensionsInterface interface { + ScaleNamespacer + DaemonSetsNamespacer + DeploymentsNamespacer + JobsNamespacer + IngressNamespacer + NetworkPolicyNamespacer + ThirdPartyResourceNamespacer + ReplicaSetsNamespacer + PodSecurityPoliciesInterface +} + +// ExtensionsClient is used to interact with experimental Kubernetes features. +// Features of Extensions group are not supported and may be changed or removed in +// incompatible ways at any time. +type ExtensionsClient struct { + *restclient.RESTClient +} + +func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicy(c) +} + +func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsClient) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +func (c *ExtensionsClient) Ingress(namespace string) IngressInterface { + return newIngress(c, namespace) +} + +func (c *ExtensionsClient) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + +func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +// NewExtensions creates a new ExtensionsClient for the given config. This client +// provides access to experimental Kubernetes features. +// Features of Extensions group are not supported and may be changed or removed in +// incompatible ways at any time. +func NewExtensions(c *restclient.Config) (*ExtensionsClient, error) { + config := *c + if err := setExtensionsDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsClient{client}, nil +} + +// NewExtensionsOrDie creates a new ExtensionsClient for the given config and +// panics if there is an error in the config. +// Features of Extensions group are not supported and may be changed or removed in +// incompatible ways at any time. +func NewExtensionsOrDie(c *restclient.Config) *ExtensionsClient { + client, err := NewExtensions(c) + if err != nil { + panic(err) + } + return client +} + +func setExtensionsDefaults(config *restclient.Config) error { + // if experimental group is not registered, return an error + g, err := registered.Group(extensions.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go new file mode 100644 index 00000000..9fc540cf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go @@ -0,0 +1,31 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "time" +) + +// FlagSet abstracts the flag interface for compatibility with both Golang "flag" +// and cobra pflags (Posix style). +type FlagSet interface { + StringVar(p *string, name, value, usage string) + BoolVar(p *bool, name string, value bool, usage string) + UintVar(p *uint, name string, value uint, usage string) + DurationVar(p *time.Duration, name string, value time.Duration, usage string) + IntVar(p *int, name string, value int, usage string) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go new file mode 100644 index 00000000..e664125f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go @@ -0,0 +1,267 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/version" + // Import solely to initialize client auth plugins. + _ "k8s.io/kubernetes/plugin/pkg/client/auth" +) + +const ( + legacyAPIPath = "/api" + defaultAPIPath = "/apis" +) + +// New creates a Kubernetes client for the given config. This client works with pods, +// replication controllers, daemons, and services. It allows operations such as list, get, update +// and delete on these objects. An error is returned if the provided configuration +// is not valid. +func New(c *restclient.Config) (*Client, error) { + config := *c + if err := SetKubernetesDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + + discoveryConfig := *c + discoveryClient, err := discovery.NewDiscoveryClientForConfig(&discoveryConfig) + if err != nil { + return nil, err + } + + var autoscalingClient *AutoscalingClient + if registered.IsRegistered(autoscaling.GroupName) { + autoscalingConfig := *c + autoscalingClient, err = NewAutoscaling(&autoscalingConfig) + if err != nil { + return nil, err + } + } + + var batchClient *BatchClient + if registered.IsRegistered(batch.GroupName) { + batchConfig := *c + batchClient, err = NewBatch(&batchConfig) + if err != nil { + return nil, err + } + } + + var extensionsClient *ExtensionsClient + if registered.IsRegistered(extensions.GroupName) { + extensionsConfig := *c + extensionsClient, err = NewExtensions(&extensionsConfig) + if err != nil { + return nil, err + } + } + var policyClient *PolicyClient + if registered.IsRegistered(policy.GroupName) { + policyConfig := *c + policyClient, err = NewPolicy(&policyConfig) + if err != nil { + return nil, err + } + } + + var appsClient *AppsClient + if registered.IsRegistered(apps.GroupName) { + appsConfig := *c + appsClient, err = NewApps(&appsConfig) + if err != nil { + return nil, err + } + } + + var rbacClient *RbacClient + if registered.IsRegistered(rbac.GroupName) { + rbacConfig := *c + rbacClient, err = NewRbac(&rbacConfig) + if err != nil { + return nil, err + } + } + + return &Client{RESTClient: client, AutoscalingClient: autoscalingClient, BatchClient: batchClient, ExtensionsClient: extensionsClient, DiscoveryClient: discoveryClient, AppsClient: appsClient, PolicyClient: policyClient, RbacClient: rbacClient}, nil +} + +// MatchesServerVersion queries the server to compares the build version +// (git hash) of the client with the server's build version. It returns an error +// if it failed to contact the server or if the versions are not an exact match. +func MatchesServerVersion(client *Client, c *restclient.Config) error { + var err error + if client == nil { + client, err = New(c) + if err != nil { + return err + } + } + cVer := version.Get() + sVer, err := client.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("couldn't read version from server: %v\n", err) + } + // GitVersion includes GitCommit and GitTreeState, but best to be safe? + if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != cVer.GitTreeState { + return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer) + } + + return nil +} + +// NegotiateVersion queries the server's supported api versions to find +// a version that both client and server support. +// - If no version is provided, try registered client versions in order of +// preference. +// - If version is provided, but not default config (explicitly requested via +// commandline flag), and is unsupported by the server, print a warning to +// stderr and try client's registered versions in order of preference. +// - If version is config default, and the server does not support it, +// return an error. +func NegotiateVersion(client *Client, c *restclient.Config, requestedGV *unversioned.GroupVersion, clientRegisteredGVs []unversioned.GroupVersion) (*unversioned.GroupVersion, error) { + var err error + if client == nil { + client, err = New(c) + if err != nil { + return nil, err + } + } + clientVersions := sets.String{} + for _, gv := range clientRegisteredGVs { + clientVersions.Insert(gv.String()) + } + groups, err := client.ServerGroups() + if err != nil { + // This is almost always a connection error, and higher level code should treat this as a generic error, + // not a negotiation specific error. + return nil, err + } + versions := unversioned.ExtractGroupVersions(groups) + serverVersions := sets.String{} + for _, v := range versions { + serverVersions.Insert(v) + } + + // If no version requested, use config version (may also be empty). + // make a copy of the original so we don't risk mutating input here or in the returned value + var preferredGV *unversioned.GroupVersion + switch { + case requestedGV != nil: + t := *requestedGV + preferredGV = &t + case c.GroupVersion != nil: + t := *c.GroupVersion + preferredGV = &t + } + + // If version explicitly requested verify that both client and server support it. + // If server does not support warn, but try to negotiate a lower version. + if preferredGV != nil { + if !clientVersions.Has(preferredGV.String()) { + return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", preferredGV, clientVersions) + + } + // If the server supports no versions, then we should just use the preferredGV + // This can happen because discovery fails due to 403 Forbidden errors + if len(serverVersions) == 0 { + return preferredGV, nil + } + if serverVersions.Has(preferredGV.String()) { + return preferredGV, nil + } + // If we are using an explicit config version the server does not support, fail. + if (c.GroupVersion != nil) && (*preferredGV == *c.GroupVersion) { + return nil, fmt.Errorf("server does not support API version %q", preferredGV) + } + } + + for _, clientGV := range clientRegisteredGVs { + if serverVersions.Has(clientGV.String()) { + // Version was not explicitly requested in command config (--api-version). + // Ok to fall back to a supported version with a warning. + // TODO: caesarxuchao: enable the warning message when we have + // proper fix. Please refer to issue #14895. + // if len(version) != 0 { + // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) + // } + t := clientGV + return &t, nil + } + } + return nil, fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v", + serverVersions, clientVersions) +} + +// NewOrDie creates a Kubernetes client and panics if the provided API version is not recognized. +func NewOrDie(c *restclient.Config) *Client { + client, err := New(c) + if err != nil { + panic(err) + } + return client +} + +// NewInCluster is a shortcut for calling InClusterConfig() and then New(). +func NewInCluster() (*Client, error) { + cc, err := restclient.InClusterConfig() + if err != nil { + return nil, err + } + return New(cc) +} + +// SetKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +// TODO: this method needs to be split into one that sets defaults per group, expected to be fix in PR "Refactoring clientcache.go and helper.go #14592" +func SetKubernetesDefaults(config *restclient.Config) error { + if config.APIPath == "" { + config.APIPath = legacyAPIPath + } + g, err := registered.Group(api.GroupName) + if err != nil { + return err + } + // TODO: Unconditionally set the config.Version, until we fix the config. + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = api.Codecs + } + if config.Codec == nil { + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + } + + return restclient.SetKubernetesDefaults(config) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go new file mode 100644 index 00000000..8cdba3a2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go @@ -0,0 +1,103 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/watch" +) + +// HorizontalPodAutoscalersNamespacer has methods to work with HorizontalPodAutoscaler resources in a namespace +type HorizontalPodAutoscalersNamespacer interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) + Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) + Delete(name string, options *api.DeleteOptions) error + Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface using AutoscalingClient internally +type horizontalPodAutoscalers struct { + client *AutoscalingClient + ns string +} + +// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of horizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { + result = &autoscaling.HorizontalPodAutoscalerList{} + err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the horizontalPodAutoscaler, and returns the corresponding HorizontalPodAutoscaler object, and an error if it occurs +func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Post().Namespace(c.ns).Resource("horizontalPodAutoscalers").Body(horizontalPodAutoscaler).Do().Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).Body(horizontalPodAutoscaler).Do().Into(result) + return +} + +// UpdateStatus takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { + result = &autoscaling.HorizontalPodAutoscaler{} + err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).SubResource("status").Body(horizontalPodAutoscaler).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("horizontalPodAutoscalers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go new file mode 100644 index 00000000..bbe61472 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go @@ -0,0 +1,40 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +// These imports are the API groups the client will support. +import ( + "fmt" + + _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/apimachinery/registered" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" +) + +func init() { + if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go new file mode 100644 index 00000000..4865b208 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// IngressNamespacer has methods to work with Ingress resources in a namespace +type IngressNamespacer interface { + Ingress(namespace string) IngressInterface +} + +// IngressInterface exposes methods to work on Ingress resources. +type IngressInterface interface { + List(opts api.ListOptions) (*extensions.IngressList, error) + Get(name string) (*extensions.Ingress, error) + Create(ingress *extensions.Ingress) (*extensions.Ingress, error) + Update(ingress *extensions.Ingress) (*extensions.Ingress, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(ingress *extensions.Ingress) (*extensions.Ingress, error) +} + +// ingress implements IngressNamespacer interface +type ingress struct { + r *ExtensionsClient + ns string +} + +// newIngress returns a ingress +func newIngress(c *ExtensionsClient, namespace string) *ingress { + return &ingress{c, namespace} +} + +// List returns a list of ingress that match the label and field selectors. +func (c *ingress) List(opts api.ListOptions) (result *extensions.IngressList, err error) { + result = &extensions.IngressList{} + err = c.r.Get().Namespace(c.ns).Resource("ingresses").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular ingress. +func (c *ingress) Get(name string) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.r.Get().Namespace(c.ns).Resource("ingresses").Name(name).Do().Into(result) + return +} + +// Create creates a new ingress. +func (c *ingress) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.r.Post().Namespace(c.ns).Resource("ingresses").Body(ingress).Do().Into(result) + return +} + +// Update updates an existing ingress. +func (c *ingress) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.r.Put().Namespace(c.ns).Resource("ingresses").Name(ingress.Name).Body(ingress).Do().Into(result) + return +} + +// Delete deletes a ingress, returns error if one occurs. +func (c *ingress) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("ingresses").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested ingress. +func (c *ingress) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the ingress and the new status. Returns the server's representation of the ingress, and an error, if it occurs. +func (c *ingress) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { + result = &extensions.Ingress{} + err = c.r.Put().Namespace(c.ns).Resource("ingresses").Name(ingress.Name).SubResource("status").Body(ingress).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go new file mode 100644 index 00000000..94b81907 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go @@ -0,0 +1,167 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/watch" +) + +// JobsNamespacer has methods to work with Job resources in a namespace +type JobsNamespacer interface { + Jobs(namespace string) JobInterface +} + +// JobInterface exposes methods to work on Job resources. +type JobInterface interface { + List(opts api.ListOptions) (*batch.JobList, error) + Get(name string) (*batch.Job, error) + Create(job *batch.Job) (*batch.Job, error) + Update(job *batch.Job) (*batch.Job, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(job *batch.Job) (*batch.Job, error) +} + +// jobs implements JobsNamespacer interface +type jobs struct { + r *ExtensionsClient + ns string +} + +// newJobs returns a jobs +func newJobs(c *ExtensionsClient, namespace string) *jobs { + return &jobs{c, namespace} +} + +// Ensure statically that jobs implements JobInterface. +var _ JobInterface = &jobs{} + +// List returns a list of jobs that match the label and field selectors. +func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} + err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular job. +func (c *jobs) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result) + return +} + +// Create creates a new job. +func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result) + return +} + +// Update updates an existing job. +func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result) + return +} + +// Delete deletes a job, returns error if one occurs. +func (c *jobs) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("jobs").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs. +func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) + return +} + +// jobsV1 implements JobsNamespacer interface using BatchClient internally +type jobsV1 struct { + r *BatchClient + ns string +} + +// newJobsV1 returns a jobsV1 +func newJobsV1(c *BatchClient, namespace string) *jobsV1 { + return &jobsV1{c, namespace} +} + +// Ensure statically that jobsV1 implements JobInterface. +var _ JobInterface = &jobsV1{} + +// List returns a list of jobs that match the label and field selectors. +func (c *jobsV1) List(opts api.ListOptions) (result *batch.JobList, err error) { + result = &batch.JobList{} + err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular job. +func (c *jobsV1) Get(name string) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result) + return +} + +// Create creates a new job. +func (c *jobsV1) Create(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result) + return +} + +// Update updates an existing job. +func (c *jobsV1) Update(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result) + return +} + +// Delete deletes a job, returns error if one occurs. +func (c *jobsV1) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("jobs").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobsV1) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs. +func (c *jobsV1) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { + result = &batch.Job{} + err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go new file mode 100644 index 00000000..8bc2253d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// LimitRangesNamespacer has methods to work with LimitRange resources in a namespace +type LimitRangesNamespacer interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + List(opts api.ListOptions) (*api.LimitRangeList, error) + Get(name string) (*api.LimitRange, error) + Delete(name string) error + Create(limitRange *api.LimitRange) (*api.LimitRange, error) + Update(limitRange *api.LimitRange) (*api.LimitRange, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// limitRanges implements LimitRangesNamespacer interface +type limitRanges struct { + r *Client + ns string +} + +// newLimitRanges returns a limitRanges +func newLimitRanges(c *Client, namespace string) *limitRanges { + return &limitRanges{ + r: c, + ns: namespace, + } +} + +// List takes a selector, and returns the list of limitRanges that match that selector. +func (c *limitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { + result = &api.LimitRangeList{} + err = c.r.Get().Namespace(c.ns).Resource("limitRanges").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the limitRange, and returns the corresponding Pod object, and an error if it occurs +func (c *limitRanges) Get(name string) (result *api.LimitRange, err error) { + result = &api.LimitRange{} + err = c.r.Get().Namespace(c.ns).Resource("limitRanges").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the limitRange, and returns an error if one occurs +func (c *limitRanges) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("limitRanges").Name(name).Do().Error() +} + +// Create takes the representation of a limitRange. Returns the server's representation of the limitRange, and an error, if it occurs. +func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { + result = &api.LimitRange{} + err = c.r.Post().Namespace(c.ns).Resource("limitRanges").Body(limitRange).Do().Into(result) + return +} + +// Update takes the representation of a limitRange to update. Returns the server's representation of the limitRange, and an error, if it occurs. +func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { + result = &api.LimitRange{} + err = c.r.Put().Namespace(c.ns).Resource("limitRanges").Name(limitRange.Name).Body(limitRange).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resource +func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("limitRanges"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go new file mode 100644 index 00000000..122bcba5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go @@ -0,0 +1,116 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +type NamespacesInterface interface { + Namespaces() NamespaceInterface +} + +type NamespaceInterface interface { + Create(item *api.Namespace) (*api.Namespace, error) + Get(name string) (result *api.Namespace, err error) + List(opts api.ListOptions) (*api.NamespaceList, error) + Delete(name string) error + Update(item *api.Namespace) (*api.Namespace, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Finalize(item *api.Namespace) (*api.Namespace, error) + Status(item *api.Namespace) (*api.Namespace, error) +} + +// namespaces implements NamespacesInterface +type namespaces struct { + r *Client +} + +// newNamespaces returns a namespaces object. +func newNamespaces(c *Client) *namespaces { + return &namespaces{r: c} +} + +// Create creates a new namespace. +func (c *namespaces) Create(namespace *api.Namespace) (*api.Namespace, error) { + result := &api.Namespace{} + err := c.r.Post().Resource("namespaces").Body(namespace).Do().Into(result) + return result, err +} + +// List lists all the namespaces in the cluster. +func (c *namespaces) List(opts api.ListOptions) (*api.NamespaceList, error) { + result := &api.NamespaceList{} + err := c.r.Get(). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Do().Into(result) + return result, err +} + +// Update takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + err = c.r.Put().Resource("namespaces").Name(namespace.Name).Body(namespace).Do().Into(result) + return +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + if len(namespace.ResourceVersion) == 0 { + err = fmt.Errorf("invalid update object, missing resource version: %v", namespace) + return + } + err = c.r.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} + +// Status takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Status(namespace *api.Namespace) (result *api.Namespace, err error) { + result = &api.Namespace{} + if len(namespace.ResourceVersion) == 0 { + err = fmt.Errorf("invalid update object, missing resource version: %v", namespace) + return + } + err = c.r.Put().Resource("namespaces").Name(namespace.Name).SubResource("status").Body(namespace).Do().Into(result) + return +} + +// Get gets an existing namespace +func (c *namespaces) Get(name string) (*api.Namespace, error) { + result := &api.Namespace{} + err := c.r.Get().Resource("namespaces").Name(name).Do().Into(result) + return result, err +} + +// Delete deletes an existing namespace. +func (c *namespaces) Delete(name string) error { + return c.r.Delete().Resource("namespaces").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Resource("namespaces"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go new file mode 100644 index 00000000..0dc9d97b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// NetworkPolicyNamespacer has methods to work with NetworkPolicy resources in a namespace +type NetworkPolicyNamespacer interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface exposes methods to work on NetworkPolicy resources. +type NetworkPolicyInterface interface { + List(opts api.ListOptions) (*extensions.NetworkPolicyList, error) + Get(name string) (*extensions.NetworkPolicy, error) + Create(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) + Update(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// NetworkPolicies implements NetworkPolicyNamespacer interface +type NetworkPolicies struct { + r *ExtensionsClient + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *ExtensionsClient, namespace string) *NetworkPolicies { + return &NetworkPolicies{c, namespace} +} + +// List returns a list of networkPolicy that match the label and field selectors. +func (c *NetworkPolicies) List(opts api.ListOptions) (result *extensions.NetworkPolicyList, err error) { + result = &extensions.NetworkPolicyList{} + err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular networkPolicy. +func (c *NetworkPolicies) Get(name string) (result *extensions.NetworkPolicy, err error) { + result = &extensions.NetworkPolicy{} + err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").Name(name).Do().Into(result) + return +} + +// Create creates a new networkPolicy. +func (c *NetworkPolicies) Create(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { + result = &extensions.NetworkPolicy{} + err = c.r.Post().Namespace(c.ns).Resource("networkpolicies").Body(networkPolicy).Do().Into(result) + return +} + +// Update updates an existing networkPolicy. +func (c *NetworkPolicies) Update(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { + result = &extensions.NetworkPolicy{} + err = c.r.Put().Namespace(c.ns).Resource("networkpolicies").Name(networkPolicy.Name).Body(networkPolicy).Do().Into(result) + return +} + +// Delete deletes a networkPolicy, returns error if one occurs. +func (c *NetworkPolicies) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("networkpolicies").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested networkPolicy. +func (c *NetworkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go new file mode 100644 index 00000000..452a03f1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go @@ -0,0 +1,100 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +type NodesInterface interface { + Nodes() NodeInterface +} + +type NodeInterface interface { + Get(name string) (result *api.Node, err error) + Create(node *api.Node) (*api.Node, error) + List(opts api.ListOptions) (*api.NodeList, error) + Delete(name string) error + Update(*api.Node) (*api.Node, error) + UpdateStatus(*api.Node) (*api.Node, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// nodes implements NodesInterface +type nodes struct { + r *Client +} + +// newNodes returns a nodes object. +func newNodes(c *Client) *nodes { + return &nodes{c} +} + +// resourceName returns node's URL resource name. +func (c *nodes) resourceName() string { + return "nodes" +} + +// Create creates a new node. +func (c *nodes) Create(node *api.Node) (*api.Node, error) { + result := &api.Node{} + err := c.r.Post().Resource(c.resourceName()).Body(node).Do().Into(result) + return result, err +} + +// List takes a selector, and returns the list of nodes that match that selector in the cluster. +func (c *nodes) List(opts api.ListOptions) (*api.NodeList, error) { + result := &api.NodeList{} + err := c.r.Get().Resource(c.resourceName()).VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return result, err +} + +// Get gets an existing node. +func (c *nodes) Get(name string) (*api.Node, error) { + result := &api.Node{} + err := c.r.Get().Resource(c.resourceName()).Name(name).Do().Into(result) + return result, err +} + +// Delete deletes an existing node. +func (c *nodes) Delete(name string) error { + return c.r.Delete().Resource(c.resourceName()).Name(name).Do().Error() +} + +// Update updates an existing node. +func (c *nodes) Update(node *api.Node) (*api.Node, error) { + result := &api.Node{} + err := c.r.Put().Resource(c.resourceName()).Name(node.Name).Body(node).Do().Into(result) + return result, err +} + +func (c *nodes) UpdateStatus(node *api.Node) (*api.Node, error) { + result := &api.Node{} + err := c.r.Put().Resource(c.resourceName()).Name(node.Name).SubResource("status").Body(node).Do().Into(result) + return result, err +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(api.NamespaceAll). + Resource(c.resourceName()). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go new file mode 100644 index 00000000..bf5447d7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// PersistentVolumeClaimsNamespacer has methods to work with PersistentVolumeClaim resources in a namespace +type PersistentVolumeClaimsNamespacer interface { + PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface +} + +// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. +type PersistentVolumeClaimInterface interface { + List(opts api.ListOptions) (*api.PersistentVolumeClaimList, error) + Get(name string) (*api.PersistentVolumeClaim, error) + Create(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + Update(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + UpdateStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// persistentVolumeClaims implements PersistentVolumeClaimsNamespacer interface +type persistentVolumeClaims struct { + client *Client + namespace string +} + +// newPersistentVolumeClaims returns a PodsClient +func newPersistentVolumeClaims(c *Client, namespace string) *persistentVolumeClaims { + return &persistentVolumeClaims{c, namespace} +} + +func (c *persistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { + result = &api.PersistentVolumeClaimList{} + + err = c.client.Get(). + Namespace(c.namespace). + Resource("persistentVolumeClaims"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +func (c *persistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Get().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(name).Do().Into(result) + return +} + +func (c *persistentVolumeClaims) Create(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Post().Namespace(c.namespace).Resource("persistentVolumeClaims").Body(claim).Do().Into(result) + return +} + +func (c *persistentVolumeClaims) Update(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Put().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(claim.Name).Body(claim).Do().Into(result) + return +} + +func (c *persistentVolumeClaims) UpdateStatus(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { + result = &api.PersistentVolumeClaim{} + err = c.client.Put().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(claim.Name).SubResource("status").Body(claim).Do().Into(result) + return +} + +func (c *persistentVolumeClaims) Delete(name string) error { + return c.client.Delete().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(name).Do().Error() +} + +func (c *persistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.namespace). + Resource("persistentVolumeClaims"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go new file mode 100644 index 00000000..2de17bb7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go @@ -0,0 +1,93 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +type PersistentVolumesInterface interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + List(opts api.ListOptions) (*api.PersistentVolumeList, error) + Get(name string) (*api.PersistentVolume, error) + Create(volume *api.PersistentVolume) (*api.PersistentVolume, error) + Update(volume *api.PersistentVolume) (*api.PersistentVolume, error) + UpdateStatus(persistentVolume *api.PersistentVolume) (*api.PersistentVolume, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// persistentVolumes implements PersistentVolumesInterface +type persistentVolumes struct { + client *Client +} + +func newPersistentVolumes(c *Client) *persistentVolumes { + return &persistentVolumes{c} +} + +func (c *persistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { + result = &api.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentVolumes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +func (c *persistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Get().Resource("persistentVolumes").Name(name).Do().Into(result) + return +} + +func (c *persistentVolumes) Create(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Post().Resource("persistentVolumes").Body(volume).Do().Into(result) + return +} + +func (c *persistentVolumes) Update(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Put().Resource("persistentVolumes").Name(volume.Name).Body(volume).Do().Into(result) + return +} + +func (c *persistentVolumes) UpdateStatus(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) { + result = &api.PersistentVolume{} + err = c.client.Put().Resource("persistentVolumes").Name(volume.Name).SubResource("status").Body(volume).Do().Into(result) + return +} + +func (c *persistentVolumes) Delete(name string) error { + return c.client.Delete().Resource("persistentVolumes").Name(name).Do().Error() +} + +func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("persistentVolumes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go new file mode 100644 index 00000000..71b1ea02 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/watch" +) + +// PetSetNamespacer has methods to work with PetSet resources in a namespace +type PetSetNamespacer interface { + PetSets(namespace string) PetSetInterface +} + +// PetSetInterface exposes methods to work on PetSet resources. +type PetSetInterface interface { + List(opts api.ListOptions) (*apps.PetSetList, error) + Get(name string) (*apps.PetSet, error) + Create(petSet *apps.PetSet) (*apps.PetSet, error) + Update(petSet *apps.PetSet) (*apps.PetSet, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(petSet *apps.PetSet) (*apps.PetSet, error) +} + +// petSet implements PetSetNamespacer interface +type petSet struct { + r *AppsClient + ns string +} + +// newPetSet returns a petSet +func newPetSet(c *AppsClient, namespace string) *petSet { + return &petSet{c, namespace} +} + +// List returns a list of petSet that match the label and field selectors. +func (c *petSet) List(opts api.ListOptions) (result *apps.PetSetList, err error) { + result = &apps.PetSetList{} + err = c.r.Get().Namespace(c.ns).Resource("petsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular petSet. +func (c *petSet) Get(name string) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Get().Namespace(c.ns).Resource("petsets").Name(name).Do().Into(result) + return +} + +// Create creates a new petSet. +func (c *petSet) Create(petSet *apps.PetSet) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Post().Namespace(c.ns).Resource("petsets").Body(petSet).Do().Into(result) + return +} + +// Update updates an existing petSet. +func (c *petSet) Update(petSet *apps.PetSet) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).Body(petSet).Do().Into(result) + return +} + +// Delete deletes a petSet, returns error if one occurs. +func (c *petSet) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("petsets").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested petSet. +func (c *petSet) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("petsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the petSet and the new status. Returns the server's representation of the petSet, and an error, if it occurs. +func (c *petSet) UpdateStatus(petSet *apps.PetSet) (result *apps.PetSet, err error) { + result = &apps.PetSet{} + err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).SubResource("status").Body(petSet).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go new file mode 100644 index 00000000..14f373f3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/watch" +) + +// PodDisruptionBudgetNamespacer has methods to work with PodDisruptionBudget resources in a namespace +type PodDisruptionBudgetNamespacer interface { + PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface +} + +// PodDisruptionBudgetInterface exposes methods to work on PodDisruptionBudget resources. +type PodDisruptionBudgetInterface interface { + List(opts api.ListOptions) (*policy.PodDisruptionBudgetList, error) + Get(name string) (*policy.PodDisruptionBudget, error) + Create(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) + Update(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) +} + +// podDisruptionBudget implements PodDisruptionBudgetNamespacer interface +type podDisruptionBudget struct { + r *PolicyClient + ns string +} + +// newPodDisruptionBudget returns a podDisruptionBudget +func newPodDisruptionBudget(c *PolicyClient, namespace string) *podDisruptionBudget { + return &podDisruptionBudget{c, namespace} +} + +// List returns a list of podDisruptionBudget that match the label and field selectors. +func (c *podDisruptionBudget) List(opts api.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { + result = &policy.PodDisruptionBudgetList{} + err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular podDisruptionBudget. +func (c *podDisruptionBudget) Get(name string) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Do().Into(result) + return +} + +// Create creates a new podDisruptionBudget. +func (c *podDisruptionBudget) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Post().Namespace(c.ns).Resource("poddisruptionbudgets").Body(podDisruptionBudget).Do().Into(result) + return +} + +// Update updates an existing podDisruptionBudget. +func (c *podDisruptionBudget) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).Body(podDisruptionBudget).Do().Into(result) + return +} + +// Delete deletes a podDisruptionBudget, returns error if one occurs. +func (c *podDisruptionBudget) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudget. +func (c *podDisruptionBudget) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the podDisruptionBudget and the new status. Returns the server's representation of the podDisruptionBudget, and an error, if it occurs. +func (c *podDisruptionBudget) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { + result = &policy.PodDisruptionBudget{} + err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).SubResource("status").Body(podDisruptionBudget).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go new file mode 100644 index 00000000..ed5b733c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go @@ -0,0 +1,94 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// PodTemplatesNamespacer has methods to work with PodTemplate resources in a namespace +type PodTemplatesNamespacer interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + List(opts api.ListOptions) (*api.PodTemplateList, error) + Get(name string) (*api.PodTemplate, error) + Delete(name string, options *api.DeleteOptions) error + Create(podTemplate *api.PodTemplate) (*api.PodTemplate, error) + Update(podTemplate *api.PodTemplate) (*api.PodTemplate, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// podTemplates implements PodTemplatesNamespacer interface +type podTemplates struct { + r *Client + ns string +} + +// newPodTemplates returns a podTemplates +func newPodTemplates(c *Client, namespace string) *podTemplates { + return &podTemplates{ + r: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of podTemplates that match those selectors. +func (c *podTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { + result = &api.PodTemplateList{} + err = c.r.Get().Namespace(c.ns).Resource("podTemplates").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the podTemplate, and returns the corresponding PodTemplate object, and an error if it occurs +func (c *podTemplates) Get(name string) (result *api.PodTemplate, err error) { + result = &api.PodTemplate{} + err = c.r.Get().Namespace(c.ns).Resource("podTemplates").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the podTemplate, and returns an error if one occurs +func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { + return c.r.Delete().Namespace(c.ns).Resource("podTemplates").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a podTemplate. Returns the server's representation of the podTemplate, and an error, if it occurs. +func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { + result = &api.PodTemplate{} + err = c.r.Post().Namespace(c.ns).Resource("podTemplates").Body(podTemplate).Do().Into(result) + return +} + +// Update takes the representation of a podTemplate to update. Returns the server's representation of the podTemplate, and an error, if it occurs. +func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { + result = &api.PodTemplate{} + err = c.r.Put().Namespace(c.ns).Resource("podTemplates").Name(podTemplate.Name).Body(podTemplate).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("podTemplates"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go new file mode 100644 index 00000000..426d3ee8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go @@ -0,0 +1,115 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/watch" +) + +// PodsNamespacer has methods to work with Pod resources in a namespace +type PodsNamespacer interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + List(opts api.ListOptions) (*api.PodList, error) + Get(name string) (*api.Pod, error) + Delete(name string, options *api.DeleteOptions) error + Create(pod *api.Pod) (*api.Pod, error) + Update(pod *api.Pod) (*api.Pod, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Bind(binding *api.Binding) error + UpdateStatus(pod *api.Pod) (*api.Pod, error) + GetLogs(name string, opts *api.PodLogOptions) *restclient.Request +} + +// pods implements PodsNamespacer interface +type pods struct { + r *Client + ns string +} + +// newPods returns a pods +func newPods(c *Client, namespace string) *pods { + return &pods{ + r: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of pods that match those selectors. +func (c *pods) List(opts api.ListOptions) (result *api.PodList, err error) { + result = &api.PodList{} + err = c.r.Get().Namespace(c.ns).Resource("pods").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the pod, and returns the corresponding Pod object, and an error if it occurs +func (c *pods) Get(name string) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.r.Get().Namespace(c.ns).Resource("pods").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the pod, and returns an error if one occurs +func (c *pods) Delete(name string, options *api.DeleteOptions) error { + return c.r.Delete().Namespace(c.ns).Resource("pods").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a pod. Returns the server's representation of the pod, and an error, if it occurs. +func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.r.Post().Namespace(c.ns).Resource("pods").Body(pod).Do().Into(result) + return +} + +// Update takes the representation of a pod to update. Returns the server's representation of the pod, and an error, if it occurs. +func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.r.Put().Namespace(c.ns).Resource("pods").Name(pod.Name).Body(pod).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *api.Binding) error { + return c.r.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +// UpdateStatus takes the name of the pod and the new status. Returns the server's representation of the pod, and an error, if it occurs. +func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) { + result = &api.Pod{} + err = c.r.Put().Namespace(c.ns).Resource("pods").Name(pod.Name).SubResource("status").Body(pod).Do().Into(result) + return +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { + return c.r.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go new file mode 100644 index 00000000..356d913d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go @@ -0,0 +1,111 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +type PodSecurityPoliciesInterface interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +type PodSecurityPolicyInterface interface { + Get(name string) (result *extensions.PodSecurityPolicy, err error) + Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) + Delete(name string) error + Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// podSecurityPolicy implements PodSecurityPolicyInterface +type podSecurityPolicy struct { + client *ExtensionsClient +} + +// newPodSecurityPolicy returns a podSecurityPolicy object. +func newPodSecurityPolicy(c *ExtensionsClient) *podSecurityPolicy { + return &podSecurityPolicy{c} +} + +func (s *podSecurityPolicy) Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { + result := &extensions.PodSecurityPolicy{} + err := s.client.Post(). + Resource("podsecuritypolicies"). + Body(psp). + Do(). + Into(result) + + return result, err +} + +// List returns a list of PodSecurityPolicies matching the selectors. +func (s *podSecurityPolicy) List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) { + result := &extensions.PodSecurityPolicyList{} + + err := s.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +// Get returns the given PodSecurityPolicy, or an error. +func (s *podSecurityPolicy) Get(name string) (*extensions.PodSecurityPolicy, error) { + result := &extensions.PodSecurityPolicy{} + err := s.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + Do(). + Into(result) + + return result, err +} + +// Watch starts watching for PodSecurityPolicies matching the given selectors. +func (s *podSecurityPolicy) Watch(opts api.ListOptions) (watch.Interface, error) { + return s.client.Get(). + Prefix("watch"). + Resource("podsecuritypolicies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +func (s *podSecurityPolicy) Delete(name string) error { + return s.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Do(). + Error() +} + +func (s *podSecurityPolicy) Update(psp *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { + result = &extensions.PodSecurityPolicy{} + err = s.client.Put(). + Resource("podsecuritypolicies"). + Name(psp.Name). + Body(psp). + Do(). + Into(result) + + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go new file mode 100644 index 00000000..8b06ce27 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go @@ -0,0 +1,83 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/client/restclient" +) + +type PolicyInterface interface { + PodDisruptionBudgetNamespacer +} + +// PolicyClient is used to interact with Kubernetes batch features. +type PolicyClient struct { + *restclient.RESTClient +} + +func (c *PolicyClient) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { + return newPodDisruptionBudget(c, namespace) +} + +func NewPolicy(c *restclient.Config) (*PolicyClient, error) { + config := *c + if err := setPolicyDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &PolicyClient{client}, nil +} + +func NewPolicyOrDie(c *restclient.Config) *PolicyClient { + client, err := NewPolicy(c) + if err != nil { + panic(err) + } + return client +} + +func setPolicyDefaults(config *restclient.Config) error { + g, err := registered.Group(policy.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go new file mode 100644 index 00000000..76ec392c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/client/restclient" +) + +// Interface holds the methods for clients of Kubernetes to allow mock testing. +type RbacInterface interface { + RoleBindingsNamespacer + RolesNamespacer + ClusterRoleBindings + ClusterRoles +} + +type RbacClient struct { + *restclient.RESTClient +} + +func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +func (c *RbacClient) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacClient) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +// NewRbac creates a new RbacClient for the given config. +func NewRbac(c *restclient.Config) (*RbacClient, error) { + config := *c + if err := setRbacDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacClient{client}, nil +} + +// NewRbacOrDie creates a new RbacClient for the given config and +// panics if there is an error in the config. +func NewRbacOrDie(c *restclient.Config) *RbacClient { + client, err := NewRbac(c) + if err != nil { + panic(err) + } + return client +} + +func setRbacDefaults(config *restclient.Config) error { + // if rbac group is not registered, return an error + g, err := registered.Group(rbac.GroupName) + if err != nil { + return err + } + config.APIPath = defaultAPIPath + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) + config.NegotiatedSerializer = api.Codecs + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go new file mode 100644 index 00000000..be928408 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go @@ -0,0 +1,100 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// ReplicaSetsNamespacer has methods to work with ReplicaSet resources in a namespace +type ReplicaSetsNamespacer interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + List(opts api.ListOptions) (*extensions.ReplicaSetList, error) + Get(name string) (*extensions.ReplicaSet, error) + Create(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error) + Update(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error) + UpdateStatus(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// replicaSets implements ReplicaSetsNamespacer interface +type replicaSets struct { + client *ExtensionsClient + ns string +} + +// newReplicaSets returns a ReplicaSetClient +func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { + return &replicaSets{c, namespace} +} + +// List takes a selector, and returns the list of ReplicaSets that match that selector. +func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { + result = &extensions.ReplicaSetList{} + err = c.client.Get().Namespace(c.ns).Resource("replicasets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular ReplicaSet. +func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Get().Namespace(c.ns).Resource("replicasets").Name(name).Do().Into(result) + return +} + +// Create creates a new ReplicaSet. +func (c *replicaSets) Create(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Post().Namespace(c.ns).Resource("replicasets").Body(rs).Do().Into(result) + return +} + +// Update updates an existing ReplicaSet. +func (c *replicaSets) Update(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Put().Namespace(c.ns).Resource("replicasets").Name(rs.Name).Body(rs).Do().Into(result) + return +} + +// UpdateStatus updates an existing ReplicaSet status +func (c *replicaSets) UpdateStatus(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { + result = &extensions.ReplicaSet{} + err = c.client.Put().Namespace(c.ns).Resource("replicasets").Name(rs.Name).SubResource("status").Body(rs).Do().Into(result) + return +} + +// Delete deletes an existing ReplicaSet. +func (c *replicaSets) Delete(name string, options *api.DeleteOptions) (err error) { + return c.client.Delete().Namespace(c.ns).Resource("replicasets").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested ReplicaSets. +func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go new file mode 100644 index 00000000..f237a76a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// ReplicationControllersNamespacer has methods to work with ReplicationController resources in a namespace +type ReplicationControllersNamespacer interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + List(opts api.ListOptions) (*api.ReplicationControllerList, error) + Get(name string) (*api.ReplicationController, error) + Create(ctrl *api.ReplicationController) (*api.ReplicationController, error) + Update(ctrl *api.ReplicationController) (*api.ReplicationController, error) + UpdateStatus(ctrl *api.ReplicationController) (*api.ReplicationController, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// replicationControllers implements ReplicationControllersNamespacer interface +type replicationControllers struct { + r *Client + ns string +} + +// newReplicationControllers returns a PodsClient +func newReplicationControllers(c *Client, namespace string) *replicationControllers { + return &replicationControllers{c, namespace} +} + +// List takes a selector, and returns the list of replication controllers that match that selector. +func (c *replicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { + result = &api.ReplicationControllerList{} + err = c.r.Get().Namespace(c.ns).Resource("replicationControllers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular replication controller. +func (c *replicationControllers) Get(name string) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.r.Get().Namespace(c.ns).Resource("replicationControllers").Name(name).Do().Into(result) + return +} + +// Create creates a new replication controller. +func (c *replicationControllers) Create(controller *api.ReplicationController) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.r.Post().Namespace(c.ns).Resource("replicationControllers").Body(controller).Do().Into(result) + return +} + +// Update updates an existing replication controller. +func (c *replicationControllers) Update(controller *api.ReplicationController) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.r.Put().Namespace(c.ns).Resource("replicationControllers").Name(controller.Name).Body(controller).Do().Into(result) + return +} + +// UpdateStatus updates an existing replication controller status +func (c *replicationControllers) UpdateStatus(controller *api.ReplicationController) (result *api.ReplicationController, err error) { + result = &api.ReplicationController{} + err = c.r.Put().Namespace(c.ns).Resource("replicationControllers").Name(controller.Name).SubResource("status").Body(controller).Do().Into(result) + return +} + +// Delete deletes an existing replication controller. +func (c *replicationControllers) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("replicationControllers").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested controllers. +func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("replicationControllers"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go new file mode 100644 index 00000000..acfd8ddb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go @@ -0,0 +1,102 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +// ResourceQuotasNamespacer has methods to work with ResourceQuota resources in a namespace +type ResourceQuotasNamespacer interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + List(opts api.ListOptions) (*api.ResourceQuotaList, error) + Get(name string) (*api.ResourceQuota, error) + Delete(name string) error + Create(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) + Update(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) + UpdateStatus(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// resourceQuotas implements ResourceQuotasNamespacer interface +type resourceQuotas struct { + r *Client + ns string +} + +// newResourceQuotas returns a resourceQuotas +func newResourceQuotas(c *Client, namespace string) *resourceQuotas { + return &resourceQuotas{ + r: c, + ns: namespace, + } +} + +// List takes a selector, and returns the list of resourceQuotas that match that selector. +func (c *resourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { + result = &api.ResourceQuotaList{} + err = c.r.Get().Namespace(c.ns).Resource("resourceQuotas").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the resourceQuota, and returns the corresponding ResourceQuota object, and an error if it occurs +func (c *resourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.r.Get().Namespace(c.ns).Resource("resourceQuotas").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the resourceQuota, and returns an error if one occurs +func (c *resourceQuotas) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("resourceQuotas").Name(name).Do().Error() +} + +// Create takes the representation of a resourceQuota. Returns the server's representation of the resourceQuota, and an error, if it occurs. +func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.r.Post().Namespace(c.ns).Resource("resourceQuotas").Body(resourceQuota).Do().Into(result) + return +} + +// Update takes the representation of a resourceQuota to update spec. Returns the server's representation of the resourceQuota, and an error, if it occurs. +func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.r.Put().Namespace(c.ns).Resource("resourceQuotas").Name(resourceQuota.Name).Body(resourceQuota).Do().Into(result) + return +} + +// Status takes the representation of a resourceQuota to update status. Returns the server's representation of the resourceQuota, and an error, if it occurs. +func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { + result = &api.ResourceQuota{} + err = c.r.Put().Namespace(c.ns).Resource("resourceQuotas").Name(resourceQuota.Name).SubResource("status").Body(resourceQuota).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resource +func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("resourceQuotas"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go new file mode 100644 index 00000000..a43815c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// RoleBindingsNamespacer has methods to work with RoleBinding resources in a namespace +type RoleBindingsNamespacer interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + List(opts api.ListOptions) (*rbac.RoleBindingList, error) + Get(name string) (*rbac.RoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + Create(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error) + Update(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// roleBindings implements RoleBindingsNamespacer interface +type roleBindings struct { + client *RbacClient + ns string +} + +// newRoleBindings returns a roleBindings +func newRoleBindings(c *RbacClient, namespace string) *roleBindings { + return &roleBindings{ + client: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of roleBindings that match those selectors. +func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { + result = &rbac.RoleBindingList{} + err = c.client.Get().Namespace(c.ns).Resource("rolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the roleBinding, and returns the corresponding RoleBinding object, and an error if it occurs +func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Get().Namespace(c.ns).Resource("rolebindings").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Namespace(c.ns).Resource("rolebindings").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if it occurs. +func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Post().Namespace(c.ns).Resource("rolebindings").Body(roleBinding).Do().Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if it occurs. +func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + result = &rbac.RoleBinding{} + err = c.client.Put().Namespace(c.ns).Resource("rolebindings").Name(roleBinding.Name).Body(roleBinding).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go new file mode 100644 index 00000000..29aee1ba --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/watch" +) + +// RolesNamespacer has methods to work with Role resources in a namespace +type RolesNamespacer interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + List(opts api.ListOptions) (*rbac.RoleList, error) + Get(name string) (*rbac.Role, error) + Delete(name string, options *api.DeleteOptions) error + Create(role *rbac.Role) (*rbac.Role, error) + Update(role *rbac.Role) (*rbac.Role, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// roles implements RolesNamespacer interface +type roles struct { + client *RbacClient + ns string +} + +// newRoles returns a roles +func newRoles(c *RbacClient, namespace string) *roles { + return &roles{ + client: c, + ns: namespace, + } +} + +// List takes label and field selectors, and returns the list of roles that match those selectors. +func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { + result = &rbac.RoleList{} + err = c.client.Get().Namespace(c.ns).Resource("roles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get takes the name of the role, and returns the corresponding Role object, and an error if it occurs +func (c *roles) Get(name string) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Get().Namespace(c.ns).Resource("roles").Name(name).Do().Into(result) + return +} + +// Delete takes the name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete().Namespace(c.ns).Resource("roles").Name(name).Body(options).Do().Error() +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if it occurs. +func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Post().Namespace(c.ns).Resource("roles").Body(role).Do().Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if it occurs. +func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { + result = &rbac.Role{} + err = c.client.Put().Namespace(c.ns).Resource("roles").Name(role.Name).Body(role).Do().Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go new file mode 100644 index 00000000..705f6048 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +type ScaleNamespacer interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale (sub)resources. +type ScaleInterface interface { + Get(string, string) (*extensions.Scale, error) + Update(string, *extensions.Scale) (*extensions.Scale, error) +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface +type scales struct { + client *ExtensionsClient + ns string +} + +// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers +func newScales(c *ExtensionsClient, namespace string) *scales { + return &scales{ + client: c, + ns: namespace, + } +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) { + result = &extensions.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get().Namespace(c.ns).Resource(resource.Resource).Name(name).SubResource("scale").Do().Into(result) + return +} + +func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { + result = &extensions.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go new file mode 100644 index 00000000..d2b83fce --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/watch" +) + +// ScheduledJobsNamespacer has methods to work with ScheduledJob resources in a namespace +type ScheduledJobsNamespacer interface { + ScheduledJobs(namespace string) ScheduledJobInterface +} + +// ScheduledJobInterface exposes methods to work on ScheduledJob resources. +type ScheduledJobInterface interface { + List(opts api.ListOptions) (*batch.ScheduledJobList, error) + Get(name string) (*batch.ScheduledJob, error) + Create(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) + Update(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) + Delete(name string, options *api.DeleteOptions) error + Watch(opts api.ListOptions) (watch.Interface, error) + UpdateStatus(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) +} + +// scheduledJobs implements ScheduledJobsNamespacer interface +type scheduledJobs struct { + r *BatchClient + ns string +} + +// newScheduledJobs returns a scheduledJobs +func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs { + return &scheduledJobs{c, namespace} +} + +// Ensure statically that scheduledJobs implements ScheduledJobInterface. +var _ ScheduledJobInterface = &scheduledJobs{} + +// List returns a list of scheduled jobs that match the label and field selectors. +func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { + result = &batch.ScheduledJobList{} + err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular scheduled job. +func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").Name(name).Do().Into(result) + return +} + +// Create creates a new scheduled job. +func (c *scheduledJobs) Create(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Post().Namespace(c.ns).Resource("scheduledJobs").Body(job).Do().Into(result) + return +} + +// Update updates an existing scheduled job. +func (c *scheduledJobs) Update(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).Body(job).Do().Into(result) + return +} + +// Delete deletes a scheduled job, returns error if one occurs. +func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) (err error) { + return c.r.Delete().Namespace(c.ns).Resource("scheduledJobs").Name(name).Body(options).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested scheduled jobs. +func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("scheduledJobs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// UpdateStatus takes the name of the scheduled job and the new status. Returns the server's representation of the scheduled job, and an error, if it occurs. +func (c *scheduledJobs) UpdateStatus(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { + result = &batch.ScheduledJob{} + err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go new file mode 100644 index 00000000..33d77ad2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +type SecretsNamespacer interface { + Secrets(namespace string) SecretsInterface +} + +type SecretsInterface interface { + Create(secret *api.Secret) (*api.Secret, error) + Update(secret *api.Secret) (*api.Secret, error) + Delete(name string) error + List(opts api.ListOptions) (*api.SecretList, error) + Get(name string) (*api.Secret, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// events implements Secrets interface +type secrets struct { + client *Client + namespace string +} + +// newSecrets returns a new secrets object. +func newSecrets(c *Client, ns string) *secrets { + return &secrets{ + client: c, + namespace: ns, + } +} + +func (s *secrets) Create(secret *api.Secret) (*api.Secret, error) { + result := &api.Secret{} + err := s.client.Post(). + Namespace(s.namespace). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + + return result, err +} + +// List returns a list of secrets matching the selectors. +func (s *secrets) List(opts api.ListOptions) (*api.SecretList, error) { + result := &api.SecretList{} + + err := s.client.Get(). + Namespace(s.namespace). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +// Get returns the given secret, or an error. +func (s *secrets) Get(name string) (*api.Secret, error) { + result := &api.Secret{} + err := s.client.Get(). + Namespace(s.namespace). + Resource("secrets"). + Name(name). + Do(). + Into(result) + + return result, err +} + +// Watch starts watching for secrets matching the given selectors. +func (s *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { + return s.client.Get(). + Prefix("watch"). + Namespace(s.namespace). + Resource("secrets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +func (s *secrets) Delete(name string) error { + return s.client.Delete(). + Namespace(s.namespace). + Resource("secrets"). + Name(name). + Do(). + Error() +} + +func (s *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { + result = &api.Secret{} + err = s.client.Put(). + Namespace(s.namespace). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/securitycontextconstraints.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/securitycontextconstraints.go new file mode 100644 index 00000000..7ac5fe44 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/securitycontextconstraints.go @@ -0,0 +1,110 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +type SecurityContextConstraintsInterface interface { + SecurityContextConstraints() SecurityContextConstraintInterface +} + +type SecurityContextConstraintInterface interface { + Get(name string) (result *api.SecurityContextConstraints, err error) + Create(scc *api.SecurityContextConstraints) (*api.SecurityContextConstraints, error) + List(opts api.ListOptions) (*api.SecurityContextConstraintsList, error) + Delete(name string) error + Update(*api.SecurityContextConstraints) (*api.SecurityContextConstraints, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// securityContextConstraints implements SecurityContextConstraintInterface +type securityContextConstraints struct { + client *Client +} + +// newSecurityContextConstraints returns a securityContextConstraints object. +func newSecurityContextConstraints(c *Client) *securityContextConstraints { + return &securityContextConstraints{c} +} + +func (s *securityContextConstraints) Create(scc *api.SecurityContextConstraints) (*api.SecurityContextConstraints, error) { + result := &api.SecurityContextConstraints{} + err := s.client.Post(). + Resource("securityContextConstraints"). + Body(scc). + Do(). + Into(result) + + return result, err +} + +// List returns a list of SecurityContextConstraints matching the selectors. +func (s *securityContextConstraints) List(opts api.ListOptions) (*api.SecurityContextConstraintsList, error) { + result := &api.SecurityContextConstraintsList{} + + err := s.client.Get(). + Resource("securityContextConstraints"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +// Get returns the given SecurityContextConstraints, or an error. +func (s *securityContextConstraints) Get(name string) (*api.SecurityContextConstraints, error) { + result := &api.SecurityContextConstraints{} + err := s.client.Get(). + Resource("securityContextConstraints"). + Name(name). + Do(). + Into(result) + + return result, err +} + +// Watch starts watching for SecurityContextConstraints matching the given selectors. +func (s *securityContextConstraints) Watch(opts api.ListOptions) (watch.Interface, error) { + return s.client.Get(). + Prefix("watch"). + Resource("securityContextConstraints"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +func (s *securityContextConstraints) Delete(name string) error { + return s.client.Delete(). + Resource("securityContextConstraints"). + Name(name). + Do(). + Error() +} + +func (s *securityContextConstraints) Update(scc *api.SecurityContextConstraints) (result *api.SecurityContextConstraints, err error) { + result = &api.SecurityContextConstraints{} + err = s.client.Put(). + Resource("securityContextConstraints"). + Name(scc.Name). + Body(scc). + Do(). + Into(result) + + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go new file mode 100644 index 00000000..d78a25c4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go @@ -0,0 +1,120 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/watch" +) + +type ServiceAccountsNamespacer interface { + ServiceAccounts(namespace string) ServiceAccountsInterface +} + +type ServiceAccountsInterface interface { + Create(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) + Update(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) + Delete(name string) error + List(opts api.ListOptions) (*api.ServiceAccountList, error) + Get(name string) (*api.ServiceAccount, error) + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// serviceAccounts implements ServiceAccounts interface +type serviceAccounts struct { + client *Client + namespace string +} + +// newServiceAccounts returns a new serviceAccounts object. +func newServiceAccounts(c *Client, ns string) ServiceAccountsInterface { + return &serviceAccounts{ + client: c, + namespace: ns, + } +} + +func (s *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) { + result := &api.ServiceAccount{} + err := s.client.Post(). + Namespace(s.namespace). + Resource("serviceAccounts"). + Body(serviceAccount). + Do(). + Into(result) + + return result, err +} + +// List returns a list of serviceAccounts matching the selectors. +func (s *serviceAccounts) List(opts api.ListOptions) (*api.ServiceAccountList, error) { + result := &api.ServiceAccountList{} + + err := s.client.Get(). + Namespace(s.namespace). + Resource("serviceAccounts"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + + return result, err +} + +// Get returns the given serviceAccount, or an error. +func (s *serviceAccounts) Get(name string) (*api.ServiceAccount, error) { + result := &api.ServiceAccount{} + err := s.client.Get(). + Namespace(s.namespace). + Resource("serviceAccounts"). + Name(name). + Do(). + Into(result) + + return result, err +} + +// Watch starts watching for serviceAccounts matching the given selectors. +func (s *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { + return s.client.Get(). + Prefix("watch"). + Namespace(s.namespace). + Resource("serviceAccounts"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +func (s *serviceAccounts) Delete(name string) error { + return s.client.Delete(). + Namespace(s.namespace). + Resource("serviceAccounts"). + Name(name). + Do(). + Error() +} + +func (s *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { + result = &api.ServiceAccount{} + err = s.client.Put(). + Namespace(s.namespace). + Resource("serviceAccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go new file mode 100644 index 00000000..8b40a5d0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go @@ -0,0 +1,121 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/util/net" + "k8s.io/kubernetes/pkg/watch" +) + +// ServicesNamespacer has methods to work with Service resources in a namespace +type ServicesNamespacer interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + List(opts api.ListOptions) (*api.ServiceList, error) + Get(name string) (*api.Service, error) + Create(srv *api.Service) (*api.Service, error) + Update(srv *api.Service) (*api.Service, error) + UpdateStatus(srv *api.Service) (*api.Service, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// services implements ServicesNamespacer interface +type services struct { + r *Client + ns string +} + +// newServices returns a services +func newServices(c *Client, namespace string) *services { + return &services{c, namespace} +} + +// List takes a selector, and returns the list of services that match that selector +func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { + result = &api.ServiceList{} + err = c.r.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Get returns information about a particular service. +func (c *services) Get(name string) (result *api.Service, err error) { + result = &api.Service{} + err = c.r.Get().Namespace(c.ns).Resource("services").Name(name).Do().Into(result) + return +} + +// Create creates a new service. +func (c *services) Create(svc *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.r.Post().Namespace(c.ns).Resource("services").Body(svc).Do().Into(result) + return +} + +// Update updates an existing service. +func (c *services) Update(svc *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.r.Put().Namespace(c.ns).Resource("services").Name(svc.Name).Body(svc).Do().Into(result) + return +} + +// UpdateStatus takes a Service object with the new status and applies it as an update to the existing Service. +func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { + result = &api.Service{} + err = c.r.Put().Namespace(c.ns).Resource("services").Name(service.Name).SubResource("status").Body(service).Do().Into(result) + return +} + +// Delete deletes an existing service. +func (c *services) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("services").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.r.Get(). + Namespace(c.ns). + Resource("services"). + SubResource("proxy"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go new file mode 100644 index 00000000..0908db06 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// ThirdPartyResourceNamespacer has methods to work with ThirdPartyResource resources in a namespace +type ThirdPartyResourceNamespacer interface { + ThirdPartyResources() ThirdPartyResourceInterface +} + +type ThirdPartyResourceInterface interface { + List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error) + Get(name string) (*extensions.ThirdPartyResource, error) + Create(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) + Update(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) + UpdateStatus(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) + Delete(name string) error + Watch(opts api.ListOptions) (watch.Interface, error) +} + +// thirdPartyResources implements DaemonsSetsNamespacer interface +type thirdPartyResources struct { + r *ExtensionsClient +} + +func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { + return &thirdPartyResources{c} +} + +// Ensure statically that thirdPartyResources implements ThirdPartyResourcesInterface. +var _ ThirdPartyResourceInterface = &thirdPartyResources{} + +func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { + result = &extensions.ThirdPartyResourceList{} + err = c.r.Get().Resource("thirdpartyresources").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) + return +} + +// Get returns information about a particular third party resource. +func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.r.Get().Resource("thirdpartyresources").Name(name).Do().Into(result) + return +} + +// Create creates a new third party resource. +func (c *thirdPartyResources) Create(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.r.Post().Resource("thirdpartyresources").Body(resource).Do().Into(result) + return +} + +// Update updates an existing third party resource. +func (c *thirdPartyResources) Update(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).Body(resource).Do().Into(result) + return +} + +// UpdateStatus updates an existing third party resource status +func (c *thirdPartyResources) UpdateStatus(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { + result = &extensions.ThirdPartyResource{} + err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).SubResource("status").Body(resource).Do().Into(result) + return +} + +// Delete deletes an existing third party resource. +func (c *thirdPartyResources) Delete(name string) error { + return c.r.Delete().Resource("thirdpartyresources").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested third party resources. +func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Resource("thirdpartyresources"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go new file mode 100644 index 00000000..37ada3c3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go @@ -0,0 +1,79 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/util/wait" +) + +// DefaultRetry is the recommended retry for a conflict where multiple clients +// are making changes to the same resource. +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} + +// DefaultBackoff is the recommended backoff for a conflict where a client +// may be attempting to make an unrelated modification to a resource under +// active management by one or more controllers. +var DefaultBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting +// write. Callers should preserve previous executions if they wish to retry changes. It performs an +// exponential backoff. +// +// var pod *api.Pod +// err := RetryOnConflict(DefaultBackoff, func() (err error) { +// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) +// return +// }) +// if err != nil { +// // may be conflict if max retries were hit +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + var lastConflictErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := fn() + switch { + case err == nil: + return true, nil + case errors.IsConflict(err): + lastConflictErr = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout { + err = lastConflictErr + } + return err +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/OWNERS new file mode 100644 index 00000000..ac830160 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/OWNERS @@ -0,0 +1,4 @@ +assignees: + - bprashanth + - derekwaynecarr + - mikedanese diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go new file mode 100644 index 00000000..a096429a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -0,0 +1,698 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/cache" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/client/record" + "k8s.io/kubernetes/pkg/controller/framework" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/integer" + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + CreatedByAnnotation = "kubernetes.io/created-by" + + // If a watch drops a delete event for a pod, it'll take this long + // before a dormant controller waiting for those packets is woken up anyway. It is + // specifically targeted at the case where some problem prevents an update + // of expectations, without it the controller could stay asleep forever. This should + // be set based on the expected latency of watch events. + // + // Currently a controller can service (create *and* observe the watch events for said + // creation) about 10 pods a second, so it takes about 1 min to service + // 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s + // latency/pod at the scale of 3000 pods over 100 nodes. + ExpectationsTimeout = 5 * time.Minute +) + +var ( + KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc +) + +type ResyncPeriodFunc func() time.Duration + +// Returns 0 for resyncPeriod in case resyncing is not needed. +func NoResyncPeriodFunc() time.Duration { + return 0 +} + +// StaticResyncPeriodFunc returns the resync period specified +func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc { + return func() time.Duration { + return resyncPeriod + } +} + +// Expectations are a way for controllers to tell the controller manager what they expect. eg: +// ControllerExpectations: { +// controller1: expects 2 adds in 2 minutes +// controller2: expects 2 dels in 2 minutes +// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met +// } +// +// Implementation: +// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion +// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller +// +// * Once set expectations can only be lowered +// * A controller isn't synced till its expectations are either fulfilled, or expire +// * Controllers that don't set expectations will get woken up for every matching controllee + +// ExpKeyFunc to parse out the key from a ControlleeExpectation +var ExpKeyFunc = func(obj interface{}) (string, error) { + if e, ok := obj.(*ControlleeExpectations); ok { + return e.key, nil + } + return "", fmt.Errorf("Could not find key for obj %#v", obj) +} + +// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations. +// Only abstracted out for testing. +// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different +// types of controllers, because the keys might conflict across types. +type ControllerExpectationsInterface interface { + GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) + SatisfiedExpectations(controllerKey string) bool + DeleteExpectations(controllerKey string) + SetExpectations(controllerKey string, add, del int) error + ExpectCreations(controllerKey string, adds int) error + ExpectDeletions(controllerKey string, dels int) error + CreationObserved(controllerKey string) + DeletionObserved(controllerKey string) + RaiseExpectations(controllerKey string, add, del int) + LowerExpectations(controllerKey string, add, del int) +} + +// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync. +type ControllerExpectations struct { + cache.Store +} + +// GetExpectations returns the ControlleeExpectations of the given controller. +func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) { + if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { + return exp.(*ControlleeExpectations), true, nil + } else { + return nil, false, err + } +} + +// DeleteExpectations deletes the expectations of the given controller from the TTLStore. +func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { + if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { + if err := r.Delete(exp); err != nil { + glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) + } + } +} + +// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed. +// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller +// manager. +func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { + if exp, exists, err := r.GetExpectations(controllerKey); exists { + if exp.Fulfilled() { + return true + } else if exp.isExpired() { + glog.V(4).Infof("Controller expectations expired %#v", exp) + return true + } else { + glog.V(4).Infof("Controller still waiting on expectations %#v", exp) + return false + } + } else if err != nil { + glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) + } else { + // When a new controller is created, it doesn't have expectations. + // When it doesn't see expected watch events for > TTL, the expectations expire. + // - In this case it wakes up, creates/deletes controllees, and sets expectations again. + // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. + // - In this case it continues without setting expectations till it needs to create/delete controllees. + glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) + } + // Trigger a sync if we either encountered and error (which shouldn't happen since we're + // getting from local store) or this controller hasn't established expectations. + return true +} + +// TODO: Extend ExpirationCache to support explicit expiration. +// TODO: Make this possible to disable in tests. +// TODO: Support injection of clock. +func (exp *ControlleeExpectations) isExpired() bool { + return util.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout +} + +// SetExpectations registers new expectations for the given controller. Forgets existing expectations. +func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { + exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: util.RealClock{}.Now()} + glog.V(4).Infof("Setting expectations %#v", exp) + return r.Add(exp) +} + +func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error { + return r.SetExpectations(controllerKey, adds, 0) +} + +func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error { + return r.SetExpectations(controllerKey, 0, dels) +} + +// Decrements the expectation counts of the given controller. +func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) { + if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { + exp.Add(int64(-add), int64(-del)) + // The expectations might've been modified since the update on the previous line. + glog.V(4).Infof("Lowered expectations %+v", exp) + } +} + +// Increments the expectation counts of the given controller. +func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) { + if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { + exp.Add(int64(add), int64(del)) + // The expectations might've been modified since the update on the previous line. + glog.V(4).Infof("Raised expectations %+v", exp) + } +} + +// CreationObserved atomically decrements the `add` expecation count of the given controller. +func (r *ControllerExpectations) CreationObserved(controllerKey string) { + r.LowerExpectations(controllerKey, 1, 0) +} + +// DeletionObserved atomically decrements the `del` expectation count of the given controller. +func (r *ControllerExpectations) DeletionObserved(controllerKey string) { + r.LowerExpectations(controllerKey, 0, 1) +} + +// Expectations are either fulfilled, or expire naturally. +type Expectations interface { + Fulfilled() bool +} + +// ControlleeExpectations track controllee creates/deletes. +type ControlleeExpectations struct { + add int64 + del int64 + key string + timestamp time.Time +} + +// Add increments the add and del counters. +func (e *ControlleeExpectations) Add(add, del int64) { + atomic.AddInt64(&e.add, add) + atomic.AddInt64(&e.del, del) +} + +// Fulfilled returns true if this expectation has been fulfilled. +func (e *ControlleeExpectations) Fulfilled() bool { + // TODO: think about why this line being atomic doesn't matter + return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0 +} + +// GetExpectations returns the add and del expectations of the controllee. +func (e *ControlleeExpectations) GetExpectations() (int64, int64) { + return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del) +} + +// NewControllerExpectations returns a store for ControllerExpectations. +func NewControllerExpectations() *ControllerExpectations { + return &ControllerExpectations{cache.NewStore(ExpKeyFunc)} +} + +// UIDSetKeyFunc to parse out the key from a UIDSet. +var UIDSetKeyFunc = func(obj interface{}) (string, error) { + if u, ok := obj.(*UIDSet); ok { + return u.key, nil + } + return "", fmt.Errorf("Could not find key for obj %#v", obj) +} + +// UIDSet holds a key and a set of UIDs. Used by the +// UIDTrackingControllerExpectations to remember which UID it has seen/still +// waiting for. +type UIDSet struct { + sets.String + key string +} + +// UIDTrackingControllerExpectations tracks the UID of the pods it deletes. +// This cache is needed over plain old expectations to safely handle graceful +// deletion. The desired behavior is to treat an update that sets the +// DeletionTimestamp on an object as a delete. To do so consistenly, one needs +// to remember the expected deletes so they aren't double counted. +// TODO: Track creates as well (#22599) +type UIDTrackingControllerExpectations struct { + ControllerExpectationsInterface + // TODO: There is a much nicer way to do this that involves a single store, + // a lock per entry, and a ControlleeExpectationsInterface type. + uidStoreLock sync.Mutex + // Store used for the UIDs associated with any expectation tracked via the + // ControllerExpectationsInterface. + uidStore cache.Store +} + +// GetUIDs is a convenience method to avoid exposing the set of expected uids. +// The returned set is not thread safe, all modifications must be made holding +// the uidStoreLock. +func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String { + if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists { + return uid.(*UIDSet).String + } + return nil +} + +// ExpectDeletions records expectations for the given deleteKeys, against the given controller. +func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error { + u.uidStoreLock.Lock() + defer u.uidStoreLock.Unlock() + + if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { + glog.Errorf("Clobbering existing delete keys: %+v", existing) + } + expectedUIDs := sets.NewString() + for _, k := range deletedKeys { + expectedUIDs.Insert(k) + } + glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) + if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { + return err + } + return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len()) +} + +// DeletionObserved records the given deleteKey as a deletion, for the given rc. +func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) { + u.uidStoreLock.Lock() + defer u.uidStoreLock.Unlock() + + uids := u.GetUIDs(rcKey) + if uids != nil && uids.Has(deleteKey) { + glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) + u.ControllerExpectationsInterface.DeletionObserved(rcKey) + uids.Delete(deleteKey) + } +} + +// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the +// underlying ControllerExpectationsInterface. +func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { + u.uidStoreLock.Lock() + defer u.uidStoreLock.Unlock() + + u.ControllerExpectationsInterface.DeleteExpectations(rcKey) + if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { + if err := u.uidStore.Delete(uidExp); err != nil { + glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) + } + } +} + +// NewUIDTrackingControllerExpectations returns a wrapper around +// ControllerExpectations that is aware of deleteKeys. +func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations { + return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)} +} + +// PodControlInterface is an interface that knows how to add or delete pods +// created as an interface to allow testing. +type PodControlInterface interface { + // CreatePods creates new pods according to the spec. + CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error + // CreatePodsOnNode creates a new pod accorting to the spec on the specified node. + CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error + // DeletePod deletes the pod identified by podID. + DeletePod(namespace string, podID string, object runtime.Object) error +} + +// RealPodControl is the default implementation of PodControlInterface. +type RealPodControl struct { + KubeClient clientset.Interface + Recorder record.EventRecorder +} + +var _ PodControlInterface = &RealPodControl{} + +func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set { + desiredLabels := make(labels.Set) + for k, v := range template.Labels { + desiredLabels[k] = v + } + return desiredLabels +} + +func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) { + desiredAnnotations := make(labels.Set) + for k, v := range template.Annotations { + desiredAnnotations[k] = v + } + createdByRef, err := api.GetReference(object) + if err != nil { + return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err) + } + + // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients + // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. + // We need to consistently handle this case of annotation versioning. + codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) + + createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{ + Reference: *createdByRef, + }) + if err != nil { + return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err) + } + desiredAnnotations[CreatedByAnnotation] = string(createdByRefJson) + return desiredAnnotations, nil +} + +func getPodsPrefix(controllerName string) string { + // use the dash (if the name isn't too long) to make the pod name a bit prettier + prefix := fmt.Sprintf("%s-", controllerName) + if len(validation.ValidatePodName(prefix, true)) != 0 { + prefix = controllerName + } + return prefix +} + +func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error { + return r.createPods("", namespace, template, object) +} + +func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { + return r.createPods(nodeName, namespace, template, object) +} + +func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object) (*api.Pod, error) { + desiredLabels := getPodsLabelSet(template) + desiredAnnotations, err := getPodsAnnotationSet(template, parentObject) + if err != nil { + return nil, err + } + accessor, err := meta.Accessor(parentObject) + if err != nil { + return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) + } + prefix := getPodsPrefix(accessor.GetName()) + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: desiredLabels, + Annotations: desiredAnnotations, + GenerateName: prefix, + }, + } + if err := api.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil { + return nil, fmt.Errorf("unable to convert pod template: %v", err) + } + return pod, nil +} + +func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { + pod, err := GetPodFromTemplate(template, object) + if err != nil { + return err + } + if len(nodeName) != 0 { + pod.Spec.NodeName = nodeName + } + if labels.Set(pod.Labels).AsSelector().Empty() { + return fmt.Errorf("unable to create pods, no labels") + } + if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil { + r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err) + return fmt.Errorf("unable to create pods: %v", err) + } else { + accessor, err := meta.Accessor(object) + if err != nil { + glog.Errorf("parentObject does not have ObjectMeta, %v", err) + return nil + } + glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) + r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name) + } + return nil +} + +func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error { + accessor, err := meta.Accessor(object) + if err != nil { + return fmt.Errorf("object does not have ObjectMeta, %v", err) + } + if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil { + r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err) + return fmt.Errorf("unable to delete pods: %v", err) + } else { + glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID) + r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulDelete", "Deleted pod: %v", podID) + } + return nil +} + +type FakePodControl struct { + sync.Mutex + Templates []api.PodTemplateSpec + DeletePodName []string + Err error +} + +var _ PodControlInterface = &FakePodControl{} + +func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error { + f.Lock() + defer f.Unlock() + if f.Err != nil { + return f.Err + } + f.Templates = append(f.Templates, *spec) + return nil +} + +func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { + f.Lock() + defer f.Unlock() + if f.Err != nil { + return f.Err + } + f.Templates = append(f.Templates, *template) + return nil +} + +func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error { + f.Lock() + defer f.Unlock() + if f.Err != nil { + return f.Err + } + f.DeletePodName = append(f.DeletePodName, podID) + return nil +} + +func (f *FakePodControl) Clear() { + f.Lock() + defer f.Unlock() + f.DeletePodName = []string{} + f.Templates = []api.PodTemplateSpec{} +} + +// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. +type ByLogging []*api.Pod + +func (s ByLogging) Len() int { return len(s) } +func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ByLogging) Less(i, j int) bool { + // 1. assigned < unassigned + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) > 0 + } + // 2. PodRunning < PodUnknown < PodPending + m := map[api.PodPhase]int{api.PodRunning: 0, api.PodUnknown: 1, api.PodPending: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. ready < not ready + if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { + return api.IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for more time < less time < empty time + if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. older pods < newer pods < empty timestamp pods + if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) { + return afterOrZero(s[j].CreationTimestamp, s[i].CreationTimestamp) + } + return false +} + +// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. +type ActivePods []*api.Pod + +func (s ActivePods) Len() int { return len(s) } +func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ActivePods) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { + return !api.IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. Empty creation time pods < newer pods < older pods + if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) { + return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp) + } + return false +} + +// afterOrZero checks if time t1 is after time t2; if one of them +// is zero, the zero time is seen as after non-zero time. +func afterOrZero(t1, t2 unversioned.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return t1.Time.IsZero() + } + return t1.After(t2.Time) +} + +func podReadyTime(pod *api.Pod) unversioned.Time { + if api.IsPodReady(pod) { + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == api.PodReady && c.Status == api.ConditionTrue { + return c.LastTransitionTime + } + } + } + return unversioned.Time{} +} + +func maxContainerRestarts(pod *api.Pod) int { + maxRestarts := 0 + for _, c := range pod.Status.ContainerStatuses { + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + } + return maxRestarts +} + +// FilterActivePods returns pods that have not terminated. +func FilterActivePods(pods []api.Pod) []*api.Pod { + var result []*api.Pod + for i := range pods { + p := pods[i] + if IsPodActive(p) { + result = append(result, &p) + } else { + glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", + p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) + } + } + return result +} + +func IsPodActive(p api.Pod) bool { + return api.PodSucceeded != p.Status.Phase && + api.PodFailed != p.Status.Phase && + p.DeletionTimestamp == nil +} + +// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods. +func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet { + active := []*extensions.ReplicaSet{} + for i := range replicaSets { + if replicaSets[i].Spec.Replicas > 0 { + active = append(active, replicaSets[i]) + } + } + return active +} + +// PodKey returns a key unique to the given pod within a cluster. +// It's used so we consistently use the same key scheme in this module. +// It does exactly what cache.MetaNamespaceKeyFunc would have done +// expcept there's not possibility for error since we know the exact type. +func PodKey(pod *api.Pod) string { + return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name) +} + +// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker. +type ControllersByCreationTimestamp []*api.ReplicationController + +func (o ControllersByCreationTimestamp) Len() int { return len(o) } +func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o ControllersByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) +} + +// ReplicaSetsByCreationTimestamp sorts a list of ReplicationSets by creation timestamp, using their names as a tie breaker. +type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet + +func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } + +func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/doc.go new file mode 100644 index 00000000..1e310b46 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controller contains code for controllers (like the replication +// controller). +package controller diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go new file mode 100644 index 00000000..c6363952 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go @@ -0,0 +1,326 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "sync" + "time" + + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" +) + +// Config contains all the settings for a Controller. +type Config struct { + // The queue for your objects; either a cache.FIFO or + // a cache.DeltaFIFO. Your Process() function should accept + // the output of this Oueue's Pop() method. + cache.Queue + + // Something that can list and watch your objects. + cache.ListerWatcher + + // Something that can process your objects. + Process ProcessFunc + + // The type of your objects. + ObjectType runtime.Object + + // Reprocess everything at least this often. + // Note that if it takes longer for you to clear the queue than this + // period, you will end up processing items in the order determined + // by cache.FIFO.Replace(). Currently, this is random. If this is a + // problem, we can change that replacement policy to append new + // things to the end of the queue instead of replacing the entire + // queue. + FullResyncPeriod time.Duration + + // If true, when Process() returns an error, re-enqueue the object. + // TODO: add interface to let you inject a delay/backoff or drop + // the object completely if desired. Pass the object in + // question to this interface as a parameter. + RetryOnError bool +} + +// ProcessFunc processes a single object. +type ProcessFunc func(obj interface{}) error + +// Controller is a generic controller framework. +type Controller struct { + config Config + reflector *cache.Reflector + reflectorMutex sync.RWMutex +} + +// TODO make the "Controller" private, and convert all references to use ControllerInterface instead +type ControllerInterface interface { + Run(stopCh <-chan struct{}) + HasSynced() bool +} + +// New makes a new Controller from the given Config. +func New(c *Config) *Controller { + ctlr := &Controller{ + config: *c, + } + return ctlr +} + +// Run begins processing items, and will continue until a value is sent down stopCh. +// It's an error to call Run more than once. +// Run blocks; call via go. +func (c *Controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + r := cache.NewReflector( + c.config.ListerWatcher, + c.config.ObjectType, + c.config.Queue, + c.config.FullResyncPeriod, + ) + + c.reflectorMutex.Lock() + c.reflector = r + c.reflectorMutex.Unlock() + + r.RunUntil(stopCh) + + wait.Until(c.processLoop, time.Second, stopCh) +} + +// Returns true once this controller has completed an initial resource listing +func (c *Controller) HasSynced() bool { + return c.config.Queue.HasSynced() +} + +// Requeue adds the provided object back into the queue if it does not already exist. +func (c *Controller) Requeue(obj interface{}) error { + return c.config.Queue.AddIfNotPresent(cache.Deltas{ + cache.Delta{ + Type: cache.Sync, + Object: obj, + }, + }) +} + +// processLoop drains the work queue. +// TODO: Consider doing the processing in parallel. This will require a little thought +// to make sure that we don't end up processing the same object multiple times +// concurrently. +func (c *Controller) processLoop() { + for { + obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process)) + if err != nil { + if c.config.RetryOnError { + // This is the safe way to re-enqueue. + c.config.Queue.AddIfNotPresent(obj) + } + } + } +} + +// ResourceEventHandler can handle notifications for events that happen to a +// resource. The events are informational only, so you can't return an +// error. +// * OnAdd is called when an object is added. +// * OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// * OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type cache.DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj interface{}) + OnUpdate(oldObj, newObj interface{}) + OnDelete(obj interface{}) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj interface{}) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// DeletionHandlingMetaNamespaceKeyFunc checks for +// cache.DeletedFinalStateUnknown objects before calling +// cache.MetaNamespaceKeyFunc. +func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { + if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return cache.MetaNamespaceKeyFunc(obj) +} + +// NewInformer returns a cache.Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// +func NewInformer( + lw cache.ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, +) (cache.Store, *Controller) { + // This will hold the client state, as we know it. + clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(cache.Deltas) { + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case cache.Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) +} + +// NewIndexerInformer returns a cache.Indexer and a controller for populating the index +// while also providing event notifications. You should only used the returned +// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// +func NewIndexerInformer( + lw cache.ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers cache.Indexers, +) (cache.Indexer, *Controller) { + // This will hold the client state, as we know it. + clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(cache.Deltas) { + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case cache.Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go new file mode 100644 index 00000000..ecd3cf28 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package framework implements all the grunt work involved in running a simple controller. +package framework diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go new file mode 100644 index 00000000..9e90e7c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "math/rand" + "strconv" + "sync" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/watch" +) + +func NewFakeControllerSource() *FakeControllerSource { + return &FakeControllerSource{ + Items: map[nnu]runtime.Object{}, + Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull), + } +} + +func NewFakePVControllerSource() *FakePVControllerSource { + return &FakePVControllerSource{ + FakeControllerSource{ + Items: map[nnu]runtime.Object{}, + Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull), + }} +} + +func NewFakePVCControllerSource() *FakePVCControllerSource { + return &FakePVCControllerSource{ + FakeControllerSource{ + Items: map[nnu]runtime.Object{}, + Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull), + }} +} + +// FakeControllerSource implements listing/watching for testing. +type FakeControllerSource struct { + lock sync.RWMutex + Items map[nnu]runtime.Object + changes []watch.Event // one change per resourceVersion + Broadcaster *watch.Broadcaster +} + +type FakePVControllerSource struct { + FakeControllerSource +} + +type FakePVCControllerSource struct { + FakeControllerSource +} + +// namespace, name, uid to be used as a key. +type nnu struct { + namespace, name string + uid types.UID +} + +// Add adds an object to the set and sends an add event to watchers. +// obj's ResourceVersion is set. +func (f *FakeControllerSource) Add(obj runtime.Object) { + f.Change(watch.Event{Type: watch.Added, Object: obj}, 1) +} + +// Modify updates an object in the set and sends a modified event to watchers. +// obj's ResourceVersion is set. +func (f *FakeControllerSource) Modify(obj runtime.Object) { + f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1) +} + +// Delete deletes an object from the set and sends a delete event to watchers. +// obj's ResourceVersion is set. +func (f *FakeControllerSource) Delete(lastValue runtime.Object) { + f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1) +} + +// AddDropWatch adds an object to the set but forgets to send an add event to +// watchers. +// obj's ResourceVersion is set. +func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) { + f.Change(watch.Event{Type: watch.Added, Object: obj}, 0) +} + +// ModifyDropWatch updates an object in the set but forgets to send a modify +// event to watchers. +// obj's ResourceVersion is set. +func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) { + f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0) +} + +// DeleteDropWatch deletes an object from the set but forgets to send a delete +// event to watchers. +// obj's ResourceVersion is set. +func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) { + f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0) +} + +func (f *FakeControllerSource) key(accessor meta.Object) nnu { + return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()} +} + +// Change records the given event (setting the object's resource version) and +// sends a watch event with the specified probability. +func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) { + f.lock.Lock() + defer f.lock.Unlock() + + accessor, err := meta.Accessor(e.Object) + if err != nil { + panic(err) // this is test code only + } + + resourceVersion := len(f.changes) + 1 + accessor.SetResourceVersion(strconv.Itoa(resourceVersion)) + f.changes = append(f.changes, e) + key := f.key(accessor) + switch e.Type { + case watch.Added, watch.Modified: + f.Items[key] = e.Object + case watch.Deleted: + delete(f.Items, key) + } + + if rand.Float64() < watchProbability { + f.Broadcaster.Action(e.Type, e.Object) + } +} + +func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) { + list := make([]runtime.Object, 0, len(f.Items)) + for _, obj := range f.Items { + // Must make a copy to allow clients to modify the object. + // Otherwise, if they make a change and write it back, they + // will inadvertently change our canonical copy (in + // addition to racing with other clients). + objCopy, err := api.Scheme.DeepCopy(obj) + if err != nil { + return nil, err + } + list = append(list, objCopy.(runtime.Object)) + } + return list, nil +} + +// List returns a list object, with its resource version set. +func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) { + f.lock.RLock() + defer f.lock.RUnlock() + list, err := f.getListItemsLocked() + if err != nil { + return nil, err + } + listObj := &api.List{} + if err := meta.SetList(listObj, list); err != nil { + return nil, err + } + objMeta, err := api.ListMetaFor(listObj) + if err != nil { + return nil, err + } + resourceVersion := len(f.changes) + objMeta.ResourceVersion = strconv.Itoa(resourceVersion) + return listObj, nil +} + +// List returns a list object, with its resource version set. +func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) { + f.lock.RLock() + defer f.lock.RUnlock() + list, err := f.FakeControllerSource.getListItemsLocked() + if err != nil { + return nil, err + } + listObj := &api.PersistentVolumeList{} + if err := meta.SetList(listObj, list); err != nil { + return nil, err + } + objMeta, err := api.ListMetaFor(listObj) + if err != nil { + return nil, err + } + resourceVersion := len(f.changes) + objMeta.ResourceVersion = strconv.Itoa(resourceVersion) + return listObj, nil +} + +// List returns a list object, with its resource version set. +func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) { + f.lock.RLock() + defer f.lock.RUnlock() + list, err := f.FakeControllerSource.getListItemsLocked() + if err != nil { + return nil, err + } + listObj := &api.PersistentVolumeClaimList{} + if err := meta.SetList(listObj, list); err != nil { + return nil, err + } + objMeta, err := api.ListMetaFor(listObj) + if err != nil { + return nil, err + } + resourceVersion := len(f.changes) + objMeta.ResourceVersion = strconv.Itoa(resourceVersion) + return listObj, nil +} + +// Watch returns a watch, which will be pre-populated with all changes +// after resourceVersion. +func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) { + f.lock.RLock() + defer f.lock.RUnlock() + rc, err := strconv.Atoi(options.ResourceVersion) + if err != nil { + return nil, err + } + if rc < len(f.changes) { + changes := []watch.Event{} + for _, c := range f.changes[rc:] { + // Must make a copy to allow clients to modify the + // object. Otherwise, if they make a change and write + // it back, they will inadvertently change the our + // canonical copy (in addition to racing with other + // clients). + objCopy, err := api.Scheme.DeepCopy(c.Object) + if err != nil { + return nil, err + } + changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)}) + } + return f.Broadcaster.WatchWithPrefix(changes), nil + } else if rc > len(f.changes) { + return nil, errors.New("resource version in the future not supported by this fake") + } + return f.Broadcaster.Watch(), nil +} + +// Shutdown closes the underlying broadcaster, waiting for events to be +// delivered. It's an error to call any method after calling shutdown. This is +// enforced by Shutdown() leaving f locked. +func (f *FakeControllerSource) Shutdown() { + f.lock.Lock() // Purposely no unlock. + f.Broadcaster.Shutdown() +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/mutation_detector.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/mutation_detector.go new file mode 100644 index 00000000..277641ef --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/mutation_detector.go @@ -0,0 +1,135 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + utildiff "k8s.io/kubernetes/pkg/util/diff" +) + +var mutationDetectionEnabled = false + +func init() { + mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) +} + +type CacheMutationDetector interface { + AddObject(obj interface{}) + Run(stopCh <-chan struct{}) + // conforms to + CompareObjects() +} + +func NewCacheMutationDetector(name string) CacheMutationDetector { + if !mutationDetectionEnabled { + return dummyMutationDetector{} + } + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} +} + +type dummyMutationDetector struct{} + +func (dummyMutationDetector) Run(stopCh <-chan struct{}) { +} +func (dummyMutationDetector) AddObject(obj interface{}) { +} +func (dummyMutationDetector) CompareObjects() { +} + +// cacheMutationDetector gives a way to detect if a cached object has been mutated +// It has a list of cached objects and their copies. I haven't thought of a way +// to see WHO is mutating it, just that its getting mutated +type defaultCacheMutationDetector struct { + name string + period time.Duration + + lock sync.Mutex + cachedObjs []cacheObj + failureFunc func(message string) +} + +// cacheObj holds the actual object and a copy +type cacheObj struct { + cached interface{} + copied interface{} +} + +func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { + // we DON'T want protection from panics. If we're running this code, we want to die + go func() { + for { + d.CompareObjects() + + select { + case <-stopCh: + return + case <-time.After(d.period): + } + } + }() +} + +// addObj makes a deep copy of the object for later comparison. It only works on runtime.Object +// but that covers the vast majority of our cached objects +func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { + if _, ok := obj.(cache.DeletedFinalStateUnknown); ok { + return + } + if _, ok := obj.(runtime.Object); !ok { + return + } + + copiedObj, err := api.Scheme.Copy(obj.(runtime.Object)) + if err != nil { + return + } + + d.lock.Lock() + defer d.lock.Unlock() + d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) +} + +func (d *defaultCacheMutationDetector) CompareObjects() { + d.lock.Lock() + defer d.lock.Unlock() + + altered := false + for i, obj := range d.cachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, utildiff.ObjectDiff(obj.cached, obj.copied)) + altered = true + } + } + + if altered { + msg := fmt.Sprintf("cache %s modified", d.name) + if d.failureFunc != nil { + d.failureFunc(msg) + return + } + panic(msg) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go new file mode 100644 index 00000000..419566e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go @@ -0,0 +1,382 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "reflect" + "sync" + "time" + + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" +) + +// if you use this, there is one behavior change compared to a standard Informer. +// When you receive a notification, the cache will be AT LEAST as fresh as the +// notification, but it MAY be more fresh. You should NOT depend on the contents +// of the cache exactly matching the notification you've received in handler +// functions. If there was a create, followed by a delete, the cache may NOT +// have your item. This has advantages over the broadcaster since it allows us +// to share a common cache across many controllers. Extending the broadcaster +// would have required us keep duplicate caches for each watch. +type SharedInformer interface { + // events to a single handler are delivered sequentially, but there is no coordination between different handlers + // You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned. + // TODO we should try to remove this restriction eventually. + AddEventHandler(handler ResourceEventHandler) error + GetStore() cache.Store + // GetController gives back a synthetic interface that "votes" to start the informer + GetController() ControllerInterface + Run(stopCh <-chan struct{}) + HasSynced() bool + LastSyncResourceVersion() string +} + +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers cache.Indexers) error + GetIndexer() cache.Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can +// be shared amongst all consumers. +func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can +// be shared amongst all consumers. +func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer { + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{}, + indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: objType, + fullResyncPeriod: resyncPeriod, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%v", reflect.TypeOf(objType))), + } + return sharedIndexInformer +} + +type sharedIndexInformer struct { + indexer cache.Indexer + controller *Controller + + processor *sharedProcessor + cacheMutationDetector CacheMutationDetector + + // This block is tracked to handle late initialization of the controller + listerWatcher cache.ListerWatcher + objectType runtime.Object + fullResyncPeriod time.Duration + + started bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disonnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.fullResyncPeriod, + RetryOnError: false, + + Process: s.HandleDeltas, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.started = true + }() + + s.cacheMutationDetector.Run(stopCh) + s.processor.run(stopCh) + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) isStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.reflector.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() cache.Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() cache.Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() ControllerInterface { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if !s.started { + listener := newProcessListener(handler) + s.processor.listeners = append(s.processor.listeners, listener) + return nil + } + + // in order to safely join, we have to + // stop sending add/update/delete notification + // do a list against the store + // send synthetic "Add" events to the new handler + // unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + listener := newProcessListener(handler) + s.processor.listeners = append(s.processor.listeners, listener) + + items := s.indexer.List() + for i := range items { + listener.add(addNotification{newObj: items[i]}) + } + + return nil +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + // from oldest to newest + for _, d := range obj.(cache.Deltas) { + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + s.cacheMutationDetector.AddObject(d.Object) + + if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { + if err := s.indexer.Update(d.Object); err != nil { + return err + } + s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}) + } else { + if err := s.indexer.Add(d.Object); err != nil { + return err + } + s.processor.distribute(addNotification{newObj: d.Object}) + } + case cache.Deleted: + if err := s.indexer.Delete(d.Object); err != nil { + return err + } + s.processor.distribute(deleteNotification{oldObj: d.Object}) + } + } + return nil +} + +type sharedProcessor struct { + listeners []*processorListener +} + +func (p *sharedProcessor) distribute(obj interface{}) { + for _, listener := range p.listeners { + listener.add(obj) + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + for _, listener := range p.listeners { + go listener.run(stopCh) + go listener.pop(stopCh) + } +} + +type processorListener struct { + // lock/cond protects access to 'pendingNotifications'. + lock sync.RWMutex + cond sync.Cond + + // pendingNotifications is an unbounded slice that holds all notifications not yet distributed + // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better + pendingNotifications []interface{} + + nextCh chan interface{} + + handler ResourceEventHandler +} + +func newProcessListener(handler ResourceEventHandler) *processorListener { + ret := &processorListener{ + pendingNotifications: []interface{}{}, + nextCh: make(chan interface{}), + handler: handler, + } + + ret.cond.L = &ret.lock + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pendingNotifications = append(p.pendingNotifications, notification) + p.cond.Broadcast() +} + +func (p *processorListener) pop(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + blockingGet := func() (interface{}, bool) { + p.lock.Lock() + defer p.lock.Unlock() + + for len(p.pendingNotifications) == 0 { + // check if we're shutdown + select { + case <-stopCh: + return nil, true + default: + } + p.cond.Wait() + } + + nt := p.pendingNotifications[0] + p.pendingNotifications = p.pendingNotifications[1:] + return nt, false + } + + notification, stopped := blockingGet() + if stopped { + return + } + + select { + case <-stopCh: + return + case p.nextCh <- notification: + } + } +} + +func (p *processorListener) run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + var next interface{} + select { + case <-stopCh: + func() { + p.lock.Lock() + defer p.lock.Unlock() + p.cond.Broadcast() + }() + return + case next = <-p.nextCh: + } + + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go b/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go new file mode 100644 index 00000000..0333eff3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "hash/adler32" + "sync" + + "github.com/golang/groupcache/lru" + "k8s.io/kubernetes/pkg/api/meta" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +type objectWithMeta interface { + meta.Object +} + +// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object. +// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels, +// they will have the same key. +func keyFunc(obj objectWithMeta) uint64 { + hash := adler32.New() + hashutil.DeepHashObject(hash, &equivalenceLabelObj{ + namespace: obj.GetNamespace(), + labels: obj.GetLabels(), + }) + return uint64(hash.Sum32()) +} + +type equivalenceLabelObj struct { + namespace string + labels map[string]string +} + +// MatchingCache save label and selector matching relationship +type MatchingCache struct { + mutex sync.RWMutex + cache *lru.Cache +} + +// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship. +func NewMatchingCache(maxCacheEntries int) *MatchingCache { + return &MatchingCache{ + cache: lru.New(maxCacheEntries), + } +} + +// Add will add matching information to the cache. +func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) { + key := keyFunc(labelObj) + c.mutex.Lock() + defer c.mutex.Unlock() + c.cache.Add(key, selectorObj) +} + +// GetMatchingObject lookup the matching object for a given object. +// Note: the cache information may be invalid since the controller may be deleted or updated, +// we need check in the external request to ensure the cache data is not dirty. +func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) { + key := keyFunc(labelObj) + // NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state( + // it need update the least recently usage information). So we can not call it concurrently. + c.mutex.Lock() + defer c.mutex.Unlock() + return c.cache.Get(key) +} + +// Update update the cached matching information. +func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) { + c.Add(labelObj, selectorObj) +} + +// InvalidateAll invalidate the whole cache. +func (c *MatchingCache) InvalidateAll() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.cache = lru.New(c.cache.MaxEntries) +} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS b/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS new file mode 100644 index 00000000..766c481b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS @@ -0,0 +1,3 @@ +assignees: + - erictune + - liggitt diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go new file mode 100644 index 00000000..b80fa594 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go @@ -0,0 +1,256 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/golang/glog" +) + +// DockerConfigJson represents ~/.docker/config.json file info +// see https://github.com/docker/docker/pull/12009 +type DockerConfigJson struct { + Auths DockerConfig `json:"auths"` + HttpHeaders map[string]string `json:"HttpHeaders,omitempty"` +} + +// DockerConfig represents the config file used by the docker CLI. +// This config that represents the credentials that should be used +// when pulling images from specific image repositories. +type DockerConfig map[string]DockerConfigEntry + +type DockerConfigEntry struct { + Username string + Password string + Email string + Provider DockerConfigProvider +} + +var ( + preferredPathLock sync.Mutex + preferredPath = "" + workingDirPath = "" + homeDirPath = os.Getenv("HOME") + rootDirPath = "/" + homeJsonDirPath = filepath.Join(homeDirPath, ".docker") + rootJsonDirPath = filepath.Join(rootDirPath, ".docker") + + configFileName = ".dockercfg" + configJsonFileName = "config.json" +) + +func SetPreferredDockercfgPath(path string) { + preferredPathLock.Lock() + defer preferredPathLock.Unlock() + preferredPath = path +} + +func GetPreferredDockercfgPath() string { + preferredPathLock.Lock() + defer preferredPathLock.Unlock() + return preferredPath +} + +func ReadDockerConfigFile() (cfg DockerConfig, err error) { + // Try happy path first - latest config file + dockerConfigJsonLocations := []string{GetPreferredDockercfgPath(), workingDirPath, homeJsonDirPath, rootJsonDirPath} + for _, configPath := range dockerConfigJsonLocations { + absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJsonFileName)) + if err != nil { + glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + continue + } + glog.V(4).Infof("looking for .docker/config.json at %s", absDockerConfigFileLocation) + contents, err := ioutil.ReadFile(absDockerConfigFileLocation) + if os.IsNotExist(err) { + continue + } + if err != nil { + glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + continue + } + cfg, err := readDockerConfigJsonFileFromBytes(contents) + if err == nil { + glog.V(4).Infof("found .docker/config.json at %s", absDockerConfigFileLocation) + return cfg, nil + } + } + glog.V(4).Infof("couldn't find valid .docker/config.json after checking in %v", dockerConfigJsonLocations) + + // Can't find latest config file so check for the old one + dockerConfigFileLocations := []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath} + for _, configPath := range dockerConfigFileLocations { + absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName)) + if err != nil { + glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + continue + } + glog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) + contents, err := ioutil.ReadFile(absDockerConfigFileLocation) + if os.IsNotExist(err) { + continue + } + if err != nil { + glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + continue + } + cfg, err := readDockerConfigFileFromBytes(contents) + if err == nil { + glog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) + return cfg, nil + } + } + return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", dockerConfigFileLocations) +} + +// HttpError wraps a non-StatusOK error code as an error. +type HttpError struct { + StatusCode int + Url string +} + +// Error implements error +func (he *HttpError) Error() string { + return fmt.Sprintf("http status code: %d while fetching url %s", + he.StatusCode, he.Url) +} + +func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if header != nil { + req.Header = *header + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + glog.V(2).Infof("body of failing http response: %v", resp.Body) + return nil, &HttpError{ + StatusCode: resp.StatusCode, + Url: url, + } + } + + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return contents, nil +} + +func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.Header) (cfg DockerConfig, err error) { + if contents, err := ReadUrl(url, client, header); err != nil { + return nil, err + } else { + return readDockerConfigFileFromBytes(contents) + } +} + +func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { + if err = json.Unmarshal(contents, &cfg); err != nil { + glog.Errorf("while trying to parse blob %q: %v", contents, err) + return nil, err + } + return +} + +func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) { + var cfgJson DockerConfigJson + if err = json.Unmarshal(contents, &cfgJson); err != nil { + glog.Errorf("while trying to parse blob %q: %v", contents, err) + return nil, err + } + cfg = cfgJson.Auths + return +} + +// dockerConfigEntryWithAuth is used solely for deserializing the Auth field +// into a dockerConfigEntry during JSON deserialization. +type dockerConfigEntryWithAuth struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Email string `json:"email,omitempty"` + Auth string `json:"auth,omitempty"` +} + +func (ident *DockerConfigEntry) UnmarshalJSON(data []byte) error { + var tmp dockerConfigEntryWithAuth + err := json.Unmarshal(data, &tmp) + if err != nil { + return err + } + + ident.Username = tmp.Username + ident.Password = tmp.Password + ident.Email = tmp.Email + + if len(tmp.Auth) == 0 { + return nil + } + + ident.Username, ident.Password, err = decodeDockerConfigFieldAuth(tmp.Auth) + return err +} + +func (ident DockerConfigEntry) MarshalJSON() ([]byte, error) { + toEncode := dockerConfigEntryWithAuth{ident.Username, ident.Password, ident.Email, ""} + toEncode.Auth = encodeDockerConfigFieldAuth(ident.Username, ident.Password) + + return json.Marshal(toEncode) +} + +// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a +// username and a password. The format of the auth field is base64(:). +func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { + decoded, err := base64.StdEncoding.DecodeString(field) + if err != nil { + return + } + + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + err = fmt.Errorf("unable to parse auth field") + return + } + + username = parts[0] + password = parts[1] + + return +} + +func encodeDockerConfigFieldAuth(username, password string) string { + fieldValue := username + ":" + password + + return base64.StdEncoding.EncodeToString([]byte(fieldValue)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go new file mode 100644 index 00000000..f071c0c8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package credentialprovider supplies interfaces and implementations for +// docker registry providers to expose their authentication scheme. +package credentialprovider diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go new file mode 100644 index 00000000..eedbee5a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go @@ -0,0 +1,338 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "encoding/json" + "net" + "net/url" + "path/filepath" + "sort" + "strings" + + "github.com/golang/glog" + + dockertypes "github.com/docker/engine-api/types" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/util/sets" +) + +// DockerKeyring tracks a set of docker registry credentials, maintaining a +// reverse index across the registry endpoints. A registry endpoint is made +// up of a host (e.g. registry.example.com), but it may also contain a path +// (e.g. registry.example.com/foo) This index is important for two reasons: +// - registry endpoints may overlap, and when this happens we must find the +// most specific match for a given image +// - iterating a map does not yield predictable results +type DockerKeyring interface { + Lookup(image string) ([]LazyAuthConfiguration, bool) +} + +// BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring +type BasicDockerKeyring struct { + index []string + creds map[string][]LazyAuthConfiguration +} + +// lazyDockerKeyring is an implementation of DockerKeyring that lazily +// materializes its dockercfg based on a set of dockerConfigProviders. +type lazyDockerKeyring struct { + Providers []DockerConfigProvider +} + +// LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its +// binding. If Provider is non-nil, it will be used to obtain new credentials +// by calling LazyProvide() on it. +type LazyAuthConfiguration struct { + dockertypes.AuthConfig + Provider DockerConfigProvider +} + +func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { + return LazyAuthConfiguration{ + AuthConfig: dockertypes.AuthConfig{ + Username: ident.Username, + Password: ident.Password, + Email: ident.Email, + }, + } +} + +func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { + if dk.index == nil { + dk.index = make([]string, 0) + dk.creds = make(map[string][]LazyAuthConfiguration) + } + for loc, ident := range cfg { + + var creds LazyAuthConfiguration + if ident.Provider != nil { + creds = LazyAuthConfiguration{ + Provider: ident.Provider, + } + } else { + creds = DockerConfigEntryToLazyAuthConfiguration(ident) + } + + value := loc + if !strings.HasPrefix(value, "https://") && !strings.HasPrefix(value, "http://") { + value = "https://" + value + } + parsed, err := url.Parse(value) + if err != nil { + glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) + continue + } + + // The docker client allows exact matches: + // foo.bar.com/namespace + // Or hostname matches: + // foo.bar.com + // It also considers /v2/ and /v1/ equivalent to the hostname + // See ResolveAuthConfig in docker/registry/auth.go. + effectivePath := parsed.Path + if strings.HasPrefix(effectivePath, "/v2/") || strings.HasPrefix(effectivePath, "/v1/") { + effectivePath = effectivePath[3:] + } + var key string + if (len(effectivePath) > 0) && (effectivePath != "/") { + key = parsed.Host + effectivePath + } else { + key = parsed.Host + } + dk.creds[key] = append(dk.creds[key], creds) + dk.index = append(dk.index, key) + } + + eliminateDupes := sets.NewString(dk.index...) + dk.index = eliminateDupes.List() + + // Update the index used to identify which credentials to use for a given + // image. The index is reverse-sorted so more specific paths are matched + // first. For example, if for the given image "quay.io/coreos/etcd", + // credentials for "quay.io/coreos" should match before "quay.io". + sort.Sort(sort.Reverse(sort.StringSlice(dk.index))) +} + +const ( + defaultRegistryHost = "index.docker.io" + defaultRegistryKey = defaultRegistryHost + "/v1/" +) + +// isDefaultRegistryMatch determines whether the given image will +// pull from the default registry (DockerHub) based on the +// characteristics of its name. +func isDefaultRegistryMatch(image string) bool { + parts := strings.SplitN(image, "/", 2) + + if len(parts[0]) == 0 { + return false + } + + if len(parts) == 1 { + // e.g. library/ubuntu + return true + } + + if parts[0] == "docker.io" || parts[0] == "index.docker.io" { + // resolve docker.io/image and index.docker.io/image as default registry + return true + } + + // From: http://blog.docker.com/2013/07/how-to-use-your-own-registry/ + // Docker looks for either a “.” (domain separator) or “:” (port separator) + // to learn that the first part of the repository name is a location and not + // a user name. + return !strings.ContainsAny(parts[0], ".:") +} + +// url.Parse require a scheme, but ours don't have schemes. Adding a +// scheme to make url.Parse happy, then clear out the resulting scheme. +func parseSchemelessUrl(schemelessUrl string) (*url.URL, error) { + parsed, err := url.Parse("https://" + schemelessUrl) + if err != nil { + return nil, err + } + // clear out the resulting scheme + parsed.Scheme = "" + return parsed, nil +} + +// split the host name into parts, as well as the port +func splitUrl(url *url.URL) (parts []string, port string) { + host, port, err := net.SplitHostPort(url.Host) + if err != nil { + // could not parse port + host, port = url.Host, "" + } + return strings.Split(host, "."), port +} + +// overloaded version of urlsMatch, operating on strings instead of URLs. +func urlsMatchStr(glob string, target string) (bool, error) { + globUrl, err := parseSchemelessUrl(glob) + if err != nil { + return false, err + } + targetUrl, err := parseSchemelessUrl(target) + if err != nil { + return false, err + } + return urlsMatch(globUrl, targetUrl) +} + +// check whether the given target url matches the glob url, which may have +// glob wild cards in the host name. +// +// Examples: +// globUrl=*.docker.io, targetUrl=blah.docker.io => match +// globUrl=*.docker.io, targetUrl=not.right.io => no match +// +// Note that we don't support wildcards in ports and paths yet. +func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) { + globUrlParts, globPort := splitUrl(globUrl) + targetUrlParts, targetPort := splitUrl(targetUrl) + if globPort != targetPort { + // port doesn't match + return false, nil + } + if len(globUrlParts) != len(targetUrlParts) { + // host name does not have the same number of parts + return false, nil + } + if !strings.HasPrefix(targetUrl.Path, globUrl.Path) { + // the path of the credential must be a prefix + return false, nil + } + for k, globUrlPart := range globUrlParts { + targetUrlPart := targetUrlParts[k] + matched, err := filepath.Match(globUrlPart, targetUrlPart) + if err != nil { + return false, err + } + if !matched { + // glob mismatch for some part + return false, nil + } + } + // everything matches + return true, nil +} + +// Lookup implements the DockerKeyring method for fetching credentials based on image name. +// Multiple credentials may be returned if there are multiple potentially valid credentials +// available. This allows for rotation. +func (dk *BasicDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { + // range over the index as iterating over a map does not provide a predictable ordering + ret := []LazyAuthConfiguration{} + for _, k := range dk.index { + // both k and image are schemeless URLs because even though schemes are allowed + // in the credential configurations, we remove them in Add. + if matched, _ := urlsMatchStr(k, image); !matched { + continue + } + + ret = append(ret, dk.creds[k]...) + } + + if len(ret) > 0 { + return ret, true + } + + // Use credentials for the default registry if provided, and appropriate + if isDefaultRegistryMatch(image) { + if auth, ok := dk.creds[defaultRegistryHost]; ok { + return auth, true + } + } + + return []LazyAuthConfiguration{}, false +} + +// Lookup implements the DockerKeyring method for fetching credentials +// based on image name. +func (dk *lazyDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { + keyring := &BasicDockerKeyring{} + + for _, p := range dk.Providers { + keyring.Add(p.Provide()) + } + + return keyring.Lookup(image) +} + +type FakeKeyring struct { + auth []LazyAuthConfiguration + ok bool +} + +func (f *FakeKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { + return f.auth, f.ok +} + +// unionDockerKeyring delegates to a set of keyrings. +type unionDockerKeyring struct { + keyrings []DockerKeyring +} + +func (k *unionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { + authConfigs := []LazyAuthConfiguration{} + for _, subKeyring := range k.keyrings { + if subKeyring == nil { + continue + } + + currAuthResults, _ := subKeyring.Lookup(image) + authConfigs = append(authConfigs, currAuthResults...) + } + + return authConfigs, (len(authConfigs) > 0) +} + +// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do, +// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring. +// If they do not, then the default keyring is returned +func MakeDockerKeyring(passedSecrets []api.Secret, defaultKeyring DockerKeyring) (DockerKeyring, error) { + passedCredentials := []DockerConfig{} + for _, passedSecret := range passedSecrets { + if dockerConfigJsonBytes, dockerConfigJsonExists := passedSecret.Data[api.DockerConfigJsonKey]; (passedSecret.Type == api.SecretTypeDockerConfigJson) && dockerConfigJsonExists && (len(dockerConfigJsonBytes) > 0) { + dockerConfigJson := DockerConfigJson{} + if err := json.Unmarshal(dockerConfigJsonBytes, &dockerConfigJson); err != nil { + return nil, err + } + + passedCredentials = append(passedCredentials, dockerConfigJson.Auths) + } else if dockercfgBytes, dockercfgExists := passedSecret.Data[api.DockerConfigKey]; (passedSecret.Type == api.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) { + dockercfg := DockerConfig{} + if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil { + return nil, err + } + + passedCredentials = append(passedCredentials, dockercfg) + } + } + + if len(passedCredentials) > 0 { + basicKeyring := &BasicDockerKeyring{} + for _, currCredentials := range passedCredentials { + basicKeyring.Add(currCredentials) + } + return &unionDockerKeyring{[]DockerKeyring{basicKeyring, defaultKeyring}}, nil + } + + return defaultKeyring, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go new file mode 100644 index 00000000..a871cc02 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go @@ -0,0 +1,62 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "sync" + + "github.com/golang/glog" +) + +// All registered credential providers. +var providersMutex sync.Mutex +var providers = make(map[string]DockerConfigProvider) + +// RegisterCredentialProvider is called by provider implementations on +// initialization to register themselves, like so: +// func init() { +// RegisterCredentialProvider("name", &myProvider{...}) +// } +func RegisterCredentialProvider(name string, provider DockerConfigProvider) { + providersMutex.Lock() + defer providersMutex.Unlock() + _, found := providers[name] + if found { + glog.Fatalf("Credential provider %q was registered twice", name) + } + glog.V(4).Infof("Registered credential provider %q", name) + providers[name] = provider +} + +// NewDockerKeyring creates a DockerKeyring to use for resolving credentials, +// which lazily draws from the set of registered credential providers. +func NewDockerKeyring() DockerKeyring { + keyring := &lazyDockerKeyring{ + Providers: make([]DockerConfigProvider, 0), + } + + // TODO(mattmoor): iterating over the map is non-deterministic. We should + // introduce the notion of priorities for conflict resolution. + for name, provider := range providers { + if provider.Enabled() { + glog.V(4).Infof("Registering credential provider: %v", name) + keyring.Providers = append(keyring.Providers, provider) + } + } + + return keyring +} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go new file mode 100644 index 00000000..21565039 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go @@ -0,0 +1,119 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentialprovider + +import ( + "os" + "reflect" + "sync" + "time" + + dockertypes "github.com/docker/engine-api/types" + "github.com/golang/glog" +) + +// DockerConfigProvider is the interface that registered extensions implement +// to materialize 'dockercfg' credentials. +type DockerConfigProvider interface { + Enabled() bool + Provide() DockerConfig + // LazyProvide() gets called after URL matches have been performed, so the + // location used as the key in DockerConfig would be redundant. + LazyProvide() *DockerConfigEntry +} + +func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { + if creds.Provider != nil { + entry := *creds.Provider.LazyProvide() + return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig + } else { + return creds.AuthConfig + } + +} + +// A DockerConfigProvider that simply reads the .dockercfg file +type defaultDockerConfigProvider struct{} + +// init registers our default provider, which simply reads the .dockercfg file. +func init() { + RegisterCredentialProvider(".dockercfg", + &CachingDockerConfigProvider{ + Provider: &defaultDockerConfigProvider{}, + Lifetime: 5 * time.Minute, + }) +} + +// CachingDockerConfigProvider implements DockerConfigProvider by composing +// with another DockerConfigProvider and caching the DockerConfig it provides +// for a pre-specified lifetime. +type CachingDockerConfigProvider struct { + Provider DockerConfigProvider + Lifetime time.Duration + + // cache fields + cacheDockerConfig DockerConfig + expiration time.Time + mu sync.Mutex +} + +// Enabled implements dockerConfigProvider +func (d *defaultDockerConfigProvider) Enabled() bool { + return true +} + +// Provide implements dockerConfigProvider +func (d *defaultDockerConfigProvider) Provide() DockerConfig { + // Read the standard Docker credentials from .dockercfg + if cfg, err := ReadDockerConfigFile(); err == nil { + return cfg + } else if !os.IsNotExist(err) { + glog.V(4).Infof("Unable to parse Docker config file: %v", err) + } + return DockerConfig{} +} + +// LazyProvide implements dockerConfigProvider. Should never be called. +func (d *defaultDockerConfigProvider) LazyProvide() *DockerConfigEntry { + return nil +} + +// Enabled implements dockerConfigProvider +func (d *CachingDockerConfigProvider) Enabled() bool { + return d.Provider.Enabled() +} + +// LazyProvide implements dockerConfigProvider. Should never be called. +func (d *CachingDockerConfigProvider) LazyProvide() *DockerConfigEntry { + return nil +} + +// Provide implements dockerConfigProvider +func (d *CachingDockerConfigProvider) Provide() DockerConfig { + d.mu.Lock() + defer d.mu.Unlock() + + // If the cache hasn't expired, return our cache + if time.Now().Before(d.expiration) { + return d.cacheDockerConfig + } + + glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) + d.cacheDockerConfig = d.Provider.Provide() + d.expiration = time.Now().Add(d.Lifetime) + return d.cacheDockerConfig +} diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go new file mode 100644 index 00000000..c91ff6ec --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath supplies methods for extracting fields from objects +// given a path to a field. +package fieldpath diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go new file mode 100644 index 00000000..bede9b2c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -0,0 +1,124 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "math" + "strconv" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/resource" +) + +// formatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + for key, value := range m { + fmtStr += fmt.Sprintf("%v=%q\n", key, value) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +// +// Currently, this API is limited to supporting the fieldpaths: +// +// 1. metadata.name - The name of an API object +// 2. metadata.namespace - The namespace of an API object +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + } + + return "", fmt.Errorf("Unsupported fieldPath: %v", fieldPath) +} + +// ExtractResourceValueByContainerName extracts the value of a resource +// by providing container name +func ExtractResourceValueByContainerName(fs *api.ResourceFieldSelector, pod *api.Pod, containerName string) (string, error) { + container, err := findContainerInPod(pod, containerName) + if err != nil { + return "", err + } + return ExtractContainerResourceValue(fs, container) +} + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// findContainerInPod finds a container by its name in the provided pod +func findContainerInPod(pod *api.Pod, containerName string) (*api.Container, error) { + for _, container := range pod.Spec.Containers { + if container.Name == containerName { + return &container, nil + } + } + return nil, fmt.Errorf("container %s not found", containerName) +} + +// convertResourceCPUTOString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS new file mode 100644 index 00000000..60bc436d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS @@ -0,0 +1,8 @@ +assignees: + - bgrant0607 + - brendandburns + - deads2k + - janetkuo + - jlowdermilk + - pwittrock + - smarterclayton diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go new file mode 100644 index 00000000..1836fc25 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go @@ -0,0 +1,194 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "encoding/json" + + "k8s.io/kubernetes/pkg/api/annotations" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/runtime" +) + +type debugError interface { + DebugError() (msg string, args []interface{}) +} + +// GetOriginalConfiguration retrieves the original configuration of the object +// from the annotation, or nil if no annotation was found. +func GetOriginalConfiguration(mapping *meta.RESTMapping, obj runtime.Object) ([]byte, error) { + annots, err := mapping.MetadataAccessor.Annotations(obj) + if err != nil { + return nil, err + } + + if annots == nil { + return nil, nil + } + + original, ok := annots[annotations.LastAppliedConfigAnnotation] + if !ok { + return nil, nil + } + + return []byte(original), nil +} + +// SetOriginalConfiguration sets the original configuration of the object +// as the annotation on the object for later use in computing a three way patch. +func SetOriginalConfiguration(info *resource.Info, original []byte) error { + if len(original) < 1 { + return nil + } + + accessor := info.Mapping.MetadataAccessor + annots, err := accessor.Annotations(info.Object) + if err != nil { + return err + } + + if annots == nil { + annots = map[string]string{} + } + + annots[annotations.LastAppliedConfigAnnotation] = string(original) + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { + return err + } + + return nil +} + +// GetModifiedConfiguration retrieves the modified configuration of the object. +// If annotate is true, it embeds the result as an anotation in the modified +// configuration. If an object was read from the command input, it will use that +// version of the object. Otherwise, it will use the version from the server. +func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime.Encoder) ([]byte, error) { + // First serialize the object without the annotation to prevent recursion, + // then add that serialization to it as the annotation and serialize it again. + var modified []byte + if info.VersionedObject != nil { + // If an object was read from input, use that version. + accessor, err := meta.Accessor(info.VersionedObject) + if err != nil { + return nil, err + } + + // Get the current annotations from the object. + annots := accessor.GetAnnotations() + if annots == nil { + annots = map[string]string{} + } + + original := annots[annotations.LastAppliedConfigAnnotation] + delete(annots, annotations.LastAppliedConfigAnnotation) + accessor.SetAnnotations(annots) + // TODO: this needs to be abstracted - there should be no assumption that versioned object + // can be marshalled to JSON. + modified, err = json.Marshal(info.VersionedObject) + if err != nil { + return nil, err + } + + if annotate { + annots[annotations.LastAppliedConfigAnnotation] = string(modified) + accessor.SetAnnotations(annots) + // TODO: this needs to be abstracted - there should be no assumption that versioned object + // can be marshalled to JSON. + modified, err = json.Marshal(info.VersionedObject) + if err != nil { + return nil, err + } + } + + // Restore the object to its original condition. + annots[annotations.LastAppliedConfigAnnotation] = original + accessor.SetAnnotations(annots) + } else { + // Otherwise, use the server side version of the object. + accessor := info.Mapping.MetadataAccessor + // Get the current annotations from the object. + annots, err := accessor.Annotations(info.Object) + if err != nil { + return nil, err + } + + if annots == nil { + annots = map[string]string{} + } + + original := annots[annotations.LastAppliedConfigAnnotation] + delete(annots, annotations.LastAppliedConfigAnnotation) + if err := accessor.SetAnnotations(info.Object, annots); err != nil { + return nil, err + } + + modified, err = runtime.Encode(codec, info.Object) + if err != nil { + return nil, err + } + + if annotate { + annots[annotations.LastAppliedConfigAnnotation] = string(modified) + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { + return nil, err + } + + modified, err = runtime.Encode(codec, info.Object) + if err != nil { + return nil, err + } + } + + // Restore the object to its original condition. + annots[annotations.LastAppliedConfigAnnotation] = original + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { + return nil, err + } + } + + return modified, nil +} + +// UpdateApplyAnnotation calls CreateApplyAnnotation if the last applied +// configuration annotation is already present. Otherwise, it does nothing. +func UpdateApplyAnnotation(info *resource.Info, codec runtime.Encoder) error { + if original, err := GetOriginalConfiguration(info.Mapping, info.Object); err != nil || len(original) <= 0 { + return err + } + return CreateApplyAnnotation(info, codec) +} + +// CreateApplyAnnotation gets the modified configuration of the object, +// without embedding it again, and then sets it on the object as the annotation. +func CreateApplyAnnotation(info *resource.Info, codec runtime.Encoder) error { + modified, err := GetModifiedConfiguration(info, false, codec) + if err != nil { + return err + } + return SetOriginalConfiguration(info, modified) +} + +// Create the annotation used by kubectl apply only when createAnnotation is true +// Otherwise, only update the annotation when it already exists +func CreateOrUpdateAnnotation(createAnnotation bool, info *resource.Info, codec runtime.Encoder) error { + if createAnnotation { + return CreateApplyAnnotation(info, codec) + } + return UpdateApplyAnnotation(info, codec) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go new file mode 100644 index 00000000..979a93bf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "strconv" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/runtime" +) + +type HorizontalPodAutoscalerV1Beta1 struct{} + +func (HorizontalPodAutoscalerV1Beta1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"default-name", true}, + {"name", false}, + {"scaleRef-kind", false}, + {"scaleRef-name", false}, + {"scaleRef-apiVersion", false}, + {"scaleRef-subresource", false}, + {"min", false}, + {"max", true}, + {"cpu-percent", false}, + } +} + +func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + return generateHPA(genericParams) +} + +type HorizontalPodAutoscalerV1 struct{} + +func (HorizontalPodAutoscalerV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"default-name", true}, + {"name", false}, + {"scaleRef-kind", false}, + {"scaleRef-name", false}, + {"scaleRef-apiVersion", false}, + {"min", false}, + {"max", true}, + {"cpu-percent", false}, + } +} + +func (HorizontalPodAutoscalerV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + return generateHPA(genericParams) +} + +func generateHPA(genericParams map[string]interface{}) (runtime.Object, error) { + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + + name, found := params["name"] + if !found || len(name) == 0 { + name, found = params["default-name"] + if !found || len(name) == 0 { + return nil, fmt.Errorf("'name' is a required parameter.") + } + } + minString, found := params["min"] + min := -1 + var err error + if found { + if min, err = strconv.Atoi(minString); err != nil { + return nil, err + } + } + maxString, found := params["max"] + if !found { + return nil, fmt.Errorf("'max' is a required parameter.") + } + max, err := strconv.Atoi(maxString) + if err != nil { + return nil, err + } + cpuString, found := params["cpu-percent"] + cpu := -1 + if found { + if cpu, err = strconv.Atoi(cpuString); err != nil { + return nil, err + } + } + + scaler := autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: params["scaleRef-kind"], + Name: params["scaleRef-name"], + APIVersion: params["scaleRef-apiVersion"], + }, + MaxReplicas: int32(max), + }, + } + if min > 0 { + v := int32(min) + scaler.Spec.MinReplicas = &v + } + if cpu >= 0 { + c := int32(cpu) + scaler.Spec.TargetCPUUtilizationPercentage = &c + } + return &scaler, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go b/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go new file mode 100644 index 00000000..e3eaf30e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go @@ -0,0 +1,36 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. + +package kubectl + +import ( + "strings" + + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/resource" +) + +func AddJsonFilenameFlag(cmd *cobra.Command, value *[]string, usage string) { + cmd.Flags().StringSliceVarP(value, "filename", "f", *value, usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + cmd.Flags().SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go new file mode 100644 index 00000000..43ddf3e9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go @@ -0,0 +1,168 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/restclient" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" +) + +func NewClientCache(loader clientcmd.ClientConfig) *ClientCache { + return &ClientCache{ + clients: make(map[unversioned.GroupVersion]*client.Client), + configs: make(map[unversioned.GroupVersion]*restclient.Config), + fedClientSets: make(map[unversioned.GroupVersion]fed_clientset.Interface), + loader: loader, + } +} + +// ClientCache caches previously loaded clients for reuse, and ensures MatchServerVersion +// is invoked only once +type ClientCache struct { + loader clientcmd.ClientConfig + clients map[unversioned.GroupVersion]*client.Client + fedClientSets map[unversioned.GroupVersion]fed_clientset.Interface + configs map[unversioned.GroupVersion]*restclient.Config + defaultConfig *restclient.Config + defaultClient *client.Client + matchVersion bool +} + +// ClientConfigForVersion returns the correct config for a server +func (c *ClientCache) ClientConfigForVersion(version *unversioned.GroupVersion) (*restclient.Config, error) { + if c.defaultConfig == nil { + config, err := c.loader.ClientConfig() + if err != nil { + return nil, err + } + c.defaultConfig = config + if c.matchVersion { + if err := client.MatchesServerVersion(c.defaultClient, config); err != nil { + return nil, err + } + } + } + if version != nil { + if config, ok := c.configs[*version]; ok { + return config, nil + } + } + + // TODO: have a better config copy method + config := *c.defaultConfig + + // TODO these fall out when we finish the refactor + var preferredGV *unversioned.GroupVersion + if version != nil { + versionCopy := *version + preferredGV = &versionCopy + } + + negotiatedVersion, err := client.NegotiateVersion(c.defaultClient, &config, preferredGV, registered.EnabledVersions()) + if err != nil { + return nil, err + } + config.GroupVersion = negotiatedVersion + client.SetKubernetesDefaults(&config) + + if version != nil { + c.configs[*version] = &config + } + + // `version` does not necessarily equal `config.Version`. However, we know that we call this method again with + // `config.Version`, we should get the the config we've just built. + configCopy := config + c.configs[*config.GroupVersion] = &configCopy + + return &config, nil +} + +// ClientForVersion initializes or reuses a client for the specified version, or returns an +// error if that is not possible +func (c *ClientCache) ClientForVersion(version *unversioned.GroupVersion) (*client.Client, error) { + if version != nil { + if client, ok := c.clients[*version]; ok { + return client, nil + } + } + config, err := c.ClientConfigForVersion(version) + if err != nil { + return nil, err + } + + kubeclient, err := client.New(config) + if err != nil { + return nil, err + } + c.clients[*config.GroupVersion] = kubeclient + + // `version` does not necessarily equal `config.Version`. However, we know that if we call this method again with + // `version`, we should get a client based on the same config we just found. There's no guarantee that a client + // is copiable, so create a new client and save it in the cache. + if version != nil { + configCopy := *config + kubeclient, err := client.New(&configCopy) + if err != nil { + return nil, err + } + c.clients[*version] = kubeclient + } + + return kubeclient, nil +} + +func (c *ClientCache) FederationClientSetForVersion(version *unversioned.GroupVersion) (fed_clientset.Interface, error) { + if version != nil { + if clientSet, found := c.fedClientSets[*version]; found { + return clientSet, nil + } + } + config, err := c.ClientConfigForVersion(version) + if err != nil { + return nil, err + } + + // TODO: support multi versions of client with clientset + clientSet, err := fed_clientset.NewForConfig(config) + if err != nil { + return nil, err + } + c.fedClientSets[*config.GroupVersion] = clientSet + + if version != nil { + configCopy := *config + clientSet, err := fed_clientset.NewForConfig(&configCopy) + if err != nil { + return nil, err + } + c.fedClientSets[*version] = clientSet + } + + return clientSet, nil +} + +func (c *ClientCache) FederationClientForVersion(version *unversioned.GroupVersion) (*restclient.RESTClient, error) { + fedClientSet, err := c.FederationClientSetForVersion(version) + if err != nil { + return nil, err + } + return fedClientSet.(*fed_clientset.Clientset).FederationClient.RESTClient, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go new file mode 100644 index 00000000..abebe31d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -0,0 +1,1230 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "os/user" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/emicklei/go-restful/swagger" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + apierrors "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/service" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/client/restclient" + client "k8s.io/kubernetes/pkg/client/unversioned" + clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/runtime/serializer/json" + utilflag "k8s.io/kubernetes/pkg/util/flag" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + FlagMatchBinaryVersion = "match-server-version" +) + +// Factory provides abstractions that allow the Kubectl command to be extended across multiple types +// of resources and different API sets. +// TODO: make the functions interfaces +// TODO: pass the various interfaces on the factory directly into the command constructors (so the +// commands are decoupled from the factory). +type Factory struct { + clients *ClientCache + flags *pflag.FlagSet + + // Returns interfaces for dealing with arbitrary runtime.Objects. If thirdPartyDiscovery is true, performs API calls + // to discovery dynamic API objects registered by third parties. + Object func(thirdPartyDiscovery bool) (meta.RESTMapper, runtime.ObjectTyper) + // Returns interfaces for decoding objects - if toInternal is set, decoded objects will be converted + // into their internal form (if possible). Eventually the internal form will be removed as an option, + // and only versioned objects will be returned. + Decoder func(toInternal bool) runtime.Decoder + // Returns an encoder capable of encoding a provided object into JSON in the default desired version. + JSONEncoder func() runtime.Encoder + // Returns a client for accessing Kubernetes resources or an error. + Client func() (*client.Client, error) + // Returns a client.Config for accessing the Kubernetes server. + ClientConfig func() (*restclient.Config, error) + // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended + // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. + ClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + // Returns a Describer for displaying the specified RESTMapping type or an error. + Describer func(mapping *meta.RESTMapping) (kubectl.Describer, error) + // Returns a Printer for formatting objects of the given type or an error. + Printer func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) + // Returns a Scaler for changing the size of the specified RESTMapping type or an error + Scaler func(mapping *meta.RESTMapping) (kubectl.Scaler, error) + // Returns a Reaper for gracefully shutting down resources. + Reaper func(mapping *meta.RESTMapping) (kubectl.Reaper, error) + // Returns a HistoryViewer for viewing change history + HistoryViewer func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) + // Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error + Rollbacker func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) + // Returns a StatusViewer for printing rollout status. + StatusViewer func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) + // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a + // new set-based selector is provided, an error is returned if the selector cannot be converted to a + // map-based selector + MapBasedSelectorForObject func(object runtime.Object) (string, error) + // PortsForObject returns the ports associated with the provided object + PortsForObject func(object runtime.Object) ([]string, error) + // ProtocolsForObject returns the mapping associated with the provided object + ProtocolsForObject func(object runtime.Object) (map[string]string, error) + // LabelsForObject returns the labels associated with the provided object + LabelsForObject func(object runtime.Object) (map[string]string, error) + // LogsForObject returns a request for the logs associated with the provided object + LogsForObject func(object, options runtime.Object) (*restclient.Request, error) + // PauseObject marks the provided object as paused ie. it will not be reconciled by its controller. + PauseObject func(object runtime.Object) (bool, error) + // ResumeObject resumes a paused object ie. it will be reconciled by its controller. + ResumeObject func(object runtime.Object) (bool, error) + // Returns a schema that can validate objects stored on disk. + Validator func(validate bool, cacheDir string) (validation.Schema, error) + // SwaggerSchema returns the schema declaration for the provided group version kind. + SwaggerSchema func(unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) + // Returns the default namespace to use in cases where no + // other namespace is specified and whether the namespace was + // overridden. + DefaultNamespace func() (string, bool, error) + // Generators returns the generators for the provided command + Generators func(cmdName string) map[string]kubectl.Generator + // Check whether the kind of resources could be exposed + CanBeExposed func(kind unversioned.GroupKind) error + // Check whether the kind of resources could be autoscaled + CanBeAutoscaled func(kind unversioned.GroupKind) error + // AttachablePodForObject returns the pod to which to attach given an object. + AttachablePodForObject func(object runtime.Object) (*api.Pod, error) + // UpdatePodSpecForObject will call the provided function on the pod spec this object supports, + // return false if no pod spec is supported, or return an error. + UpdatePodSpecForObject func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) + // EditorEnvs returns a group of environment variables that the edit command + // can range over in order to determine if the user has specified an editor + // of their choice. + EditorEnvs func() []string + // PrintObjectSpecificMessage prints object-specific messages on the provided writer + PrintObjectSpecificMessage func(obj runtime.Object, out io.Writer) +} + +const ( + RunV1GeneratorName = "run/v1" + RunPodV1GeneratorName = "run-pod/v1" + ServiceV1GeneratorName = "service/v1" + ServiceV2GeneratorName = "service/v2" + ServiceAccountV1GeneratorName = "serviceaccount/v1" + HorizontalPodAutoscalerV1Beta1GeneratorName = "horizontalpodautoscaler/v1beta1" + HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1" + DeploymentV1Beta1GeneratorName = "deployment/v1beta1" + JobV1Beta1GeneratorName = "job/v1beta1" + JobV1GeneratorName = "job/v1" + NamespaceV1GeneratorName = "namespace/v1" + SecretV1GeneratorName = "secret/v1" + SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1" + SecretForTLSV1GeneratorName = "secret-for-tls/v1" + ConfigMapV1GeneratorName = "configmap/v1" +) + +// DefaultGenerators returns the set of default generators for use in Factory instances +func DefaultGenerators(cmdName string) map[string]kubectl.Generator { + generators := map[string]map[string]kubectl.Generator{} + generators["expose"] = map[string]kubectl.Generator{ + ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{}, + ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{}, + } + generators["run"] = map[string]kubectl.Generator{ + RunV1GeneratorName: kubectl.BasicReplicationController{}, + RunPodV1GeneratorName: kubectl.BasicPod{}, + DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, + JobV1Beta1GeneratorName: kubectl.JobV1Beta1{}, + JobV1GeneratorName: kubectl.JobV1{}, + } + generators["autoscale"] = map[string]kubectl.Generator{ + HorizontalPodAutoscalerV1Beta1GeneratorName: kubectl.HorizontalPodAutoscalerV1Beta1{}, + HorizontalPodAutoscalerV1GeneratorName: kubectl.HorizontalPodAutoscalerV1{}, + } + generators["namespace"] = map[string]kubectl.Generator{ + NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, + } + generators["secret"] = map[string]kubectl.Generator{ + SecretV1GeneratorName: kubectl.SecretGeneratorV1{}, + } + generators["secret-for-docker-registry"] = map[string]kubectl.Generator{ + SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, + } + generators["secret-for-tls"] = map[string]kubectl.Generator{ + SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, + } + + return generators[cmdName] +} + +func getGroupVersionKinds(gvks []unversioned.GroupVersionKind, group string) []unversioned.GroupVersionKind { + result := []unversioned.GroupVersionKind{} + for ix := range gvks { + if gvks[ix].Group == group { + result = append(result, gvks[ix]) + } + } + return result +} + +func makeInterfacesFor(versionList []unversioned.GroupVersion) func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + accessor := meta.NewAccessor() + return func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + for ix := range versionList { + if versionList[ix].String() == version.String() { + return &meta.VersionInterfaces{ + ObjectConvertor: thirdpartyresourcedata.NewThirdPartyObjectConverter(api.Scheme), + MetadataAccessor: accessor, + }, nil + } + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, versionList) + } +} + +// NewFactory creates a factory with the default Kubernetes resources defined +// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. +// if optionalClientConfig is not nil, then this factory will make use of it. +func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { + mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()} + + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags + + clientConfig := optionalClientConfig + if optionalClientConfig == nil { + clientConfig = DefaultClientConfig(flags) + } + + clients := NewClientCache(clientConfig) + + return &Factory{ + clients: clients, + flags: flags, + + // If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that + // have been dynamically added to the apiserver + Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) { + cfg, err := clientConfig.ClientConfig() + CheckErr(err) + cmdApiVersion := unversioned.GroupVersion{} + if cfg.GroupVersion != nil { + cmdApiVersion = *cfg.GroupVersion + } + if discoverDynamicAPIs { + client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"}) + CheckErr(err) + + var versions []unversioned.GroupVersion + var gvks []unversioned.GroupVersionKind + retries := 3 + for i := 0; i < retries; i++ { + versions, gvks, err = GetThirdPartyGroupVersions(client.Discovery()) + // Retry if we got a NotFound error, because user may delete + // a thirdparty group when the GetThirdPartyGroupVersions is + // running. + if err == nil || !apierrors.IsNotFound(err) { + break + } + } + CheckErr(err) + if len(versions) > 0 { + priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper) + if !ok { + CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper)) + return nil, nil + } + multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper) + if !ok { + CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper)) + return nil, nil + } + groupsMap := map[string][]unversioned.GroupVersion{} + for _, version := range versions { + groupsMap[version.Group] = append(groupsMap[version.Group], version) + } + for group, versionList := range groupsMap { + preferredExternalVersion := versionList[0] + + thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group)) + CheckErr(err) + accessor := meta.NewAccessor() + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: versionList, + RESTMapper: thirdPartyMapper, + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: makeInterfacesFor(versionList), + } + + CheckErr(registered.RegisterGroup(groupMeta)) + registered.AddThirdPartyAPIGroupVersions(versionList...) + multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...) + } + priorityMapper.Delegate = multiMapper + // Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we + // don't re-assign, the above assignement won't actually update mapper.RESTMapper + mapper.RESTMapper = priorityMapper + } + } + outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} + priorityRESTMapper := meta.PriorityRESTMapper{ + Delegate: outputRESTMapper, + ResourcePriority: []unversioned.GroupVersionResource{ + {Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, + {Group: autoscaling.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, + {Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, + {Group: federation.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, + }, + KindPriority: []unversioned.GroupVersionKind{ + {Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, + {Group: autoscaling.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, + {Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, + {Group: federation.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, + }, + } + return priorityRESTMapper, api.Scheme + }, + Client: func() (*client.Client, error) { + return clients.ClientForVersion(nil) + }, + ClientConfig: func() (*restclient.Config, error) { + return clients.ClientConfigForVersion(nil) + }, + ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { + gvk := mapping.GroupVersionKind + mappingVersion := mapping.GroupVersionKind.GroupVersion() + c, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + switch gvk.Group { + case api.GroupName: + return c.RESTClient, nil + case autoscaling.GroupName: + return c.AutoscalingClient.RESTClient, nil + case batch.GroupName: + return c.BatchClient.RESTClient, nil + case policy.GroupName: + return c.PolicyClient.RESTClient, nil + case apps.GroupName: + return c.AppsClient.RESTClient, nil + case extensions.GroupName: + return c.ExtensionsClient.RESTClient, nil + case api.SchemeGroupVersion.Group: + return c.RESTClient, nil + case extensions.SchemeGroupVersion.Group: + return c.ExtensionsClient.RESTClient, nil + case federation.GroupName: + return clients.FederationClientForVersion(&mappingVersion) + case rbac.GroupName: + return c.RbacClient.RESTClient, nil + default: + if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { + return nil, fmt.Errorf("unknown api group/version: %s", gvk.String()) + } + cfg, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + gv := gvk.GroupVersion() + cfg.GroupVersion = &gv + cfg.APIPath = "/apis" + cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk) + cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv) + return restclient.RESTClientFor(cfg) + } + }, + Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + if mapping.GroupVersionKind.Group == federation.GroupName { + fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion) + if err != nil { + return nil, err + } + if mapping.GroupVersionKind.Kind == "Cluster" { + return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil + } + } + client, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { + return describer, nil + } + return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) + }, + Decoder: func(toInternal bool) runtime.Decoder { + var decoder runtime.Decoder + if toInternal { + decoder = api.Codecs.UniversalDecoder() + } else { + decoder = api.Codecs.UniversalDeserializer() + } + return thirdpartyresourcedata.NewDecoder(decoder, "") + + }, + JSONEncoder: func() runtime.Encoder { + return api.Codecs.LegacyCodec(registered.EnabledVersions()...) + }, + Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { + return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil + }, + MapBasedSelectorForObject: func(object runtime.Object) (string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *api.ReplicationController: + return kubectl.MakeLabels(t.Spec.Selector), nil + case *api.Pod: + if len(t.Labels) == 0 { + return "", fmt.Errorf("the pod has no labels and cannot be exposed") + } + return kubectl.MakeLabels(t.Labels), nil + case *api.Service: + if t.Spec.Selector == nil { + return "", fmt.Errorf("the service has no pod selector set") + } + return kubectl.MakeLabels(t.Spec.Selector), nil + case *extensions.Deployment: + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + case *extensions.ReplicaSet: + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return "", err + } + return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) + } + }, + PortsForObject: func(object runtime.Object) ([]string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *api.ReplicationController: + return getPorts(t.Spec.Template.Spec), nil + case *api.Pod: + return getPorts(t.Spec), nil + case *api.Service: + return getServicePorts(t.Spec), nil + case *extensions.Deployment: + return getPorts(t.Spec.Template.Spec), nil + case *extensions.ReplicaSet: + return getPorts(t.Spec.Template.Spec), nil + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) + } + }, + ProtocolsForObject: func(object runtime.Object) (map[string]string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *api.ReplicationController: + return getProtocols(t.Spec.Template.Spec), nil + case *api.Pod: + return getProtocols(t.Spec), nil + case *api.Service: + return getServiceProtocols(t.Spec), nil + case *extensions.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + case *extensions.ReplicaSet: + return getProtocols(t.Spec.Template.Spec), nil + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) + } + }, + LabelsForObject: func(object runtime.Object) (map[string]string, error) { + return meta.NewAccessor().Labels(object) + }, + LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { + c, err := clients.ClientForVersion(nil) + if err != nil { + return nil, err + } + + switch t := object.(type) { + case *api.Pod: + opts, ok := options.(*api.PodLogOptions) + if !ok { + return nil, errors.New("provided options object is not a PodLogOptions") + } + return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil + + case *api.ReplicationController: + opts, ok := options.(*api.PodLogOptions) + if !ok { + return nil, errors.New("provided options object is not a PodLogOptions") + } + selector := labels.SelectorFromSet(t.Spec.Selector) + sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } + pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) + if err != nil { + return nil, err + } + if numPods > 1 { + fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) + } + + return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil + + case *extensions.ReplicaSet: + opts, ok := options.(*api.PodLogOptions) + if !ok { + return nil, errors.New("provided options object is not a PodLogOptions") + } + selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) } + pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) + if err != nil { + return nil, err + } + if numPods > 1 { + fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) + } + + return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil + + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) + } + }, + PauseObject: func(object runtime.Object) (bool, error) { + c, err := clients.ClientForVersion(nil) + if err != nil { + return false, err + } + + switch t := object.(type) { + case *extensions.Deployment: + if t.Spec.Paused { + return true, nil + } + t.Spec.Paused = true + _, err := c.Extensions().Deployments(t.Namespace).Update(t) + return false, err + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return false, err + } + return false, fmt.Errorf("cannot pause %v", gvks[0]) + } + }, + ResumeObject: func(object runtime.Object) (bool, error) { + c, err := clients.ClientForVersion(nil) + if err != nil { + return false, err + } + + switch t := object.(type) { + case *extensions.Deployment: + if !t.Spec.Paused { + return true, nil + } + t.Spec.Paused = false + _, err := c.Extensions().Deployments(t.Namespace).Update(t) + return false, err + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return false, err + } + return false, fmt.Errorf("cannot resume %v", gvks[0]) + } + }, + Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + client, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) + }, + Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + client, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) + }, + HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + client, err := clients.ClientForVersion(&mappingVersion) + clientset := clientset.FromUnversionedClient(client) + if err != nil { + return nil, err + } + return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) + }, + Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + client, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) + }, + StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { + mappingVersion := mapping.GroupVersionKind.GroupVersion() + client, err := clients.ClientForVersion(&mappingVersion) + if err != nil { + return nil, err + } + return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), client) + }, + Validator: func(validate bool, cacheDir string) (validation.Schema, error) { + if validate { + client, err := clients.ClientForVersion(nil) + if err != nil { + return nil, err + } + dir := cacheDir + if len(dir) > 0 { + version, err := client.ServerVersion() + if err != nil { + return nil, err + } + dir = path.Join(cacheDir, version.String()) + } + fedClient, err := clients.FederationClientForVersion(nil) + if err != nil { + return nil, err + } + return &clientSwaggerSchema{ + c: client, + fedc: fedClient, + cacheDir: dir, + mapper: api.RESTMapper, + }, nil + } + return validation.NullSchema{}, nil + }, + SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { + version := gvk.GroupVersion() + client, err := clients.ClientForVersion(&version) + if err != nil { + return nil, err + } + return client.Discovery().SwaggerSchema(version) + }, + DefaultNamespace: func() (string, bool, error) { + return clientConfig.Namespace() + }, + Generators: func(cmdName string) map[string]kubectl.Generator { + return DefaultGenerators(cmdName) + }, + CanBeExposed: func(kind unversioned.GroupKind) error { + switch kind { + case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): + // nothing to do here + default: + return fmt.Errorf("cannot expose a %s", kind) + } + return nil + }, + CanBeAutoscaled: func(kind unversioned.GroupKind) error { + switch kind { + case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): + // nothing to do here + default: + return fmt.Errorf("cannot autoscale a %v", kind) + } + return nil + }, + AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { + client, err := clients.ClientForVersion(nil) + if err != nil { + return nil, err + } + switch t := object.(type) { + case *api.ReplicationController: + selector := labels.SelectorFromSet(t.Spec.Selector) + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) + return pod, err + case *extensions.Deployment: + selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) + return pod, err + case *batch.Job: + selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) + return pod, err + case *api.Pod: + return t, nil + default: + gvks, _, err := api.Scheme.ObjectKinds(object) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) + } + }, + // UpdatePodSpecForObject update the pod specification for the provided object + UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { + // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) + switch t := obj.(type) { + case *api.Pod: + return true, fn(&t.Spec) + case *api.ReplicationController: + if t.Spec.Template == nil { + t.Spec.Template = &api.PodTemplateSpec{} + } + return true, fn(&t.Spec.Template.Spec) + case *extensions.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *extensions.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *extensions.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + case *apps.PetSet: + return true, fn(&t.Spec.Template.Spec) + case *batch.Job: + return true, fn(&t.Spec.Template.Spec) + default: + return false, fmt.Errorf("the object is not a pod or does not have a pod template") + } + }, + EditorEnvs: func() []string { + return []string{"KUBE_EDITOR", "EDITOR"} + }, + PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) { + switch obj := obj.(type) { + case *api.Service: + if obj.Spec.Type == api.ServiceTypeNodePort { + msg := fmt.Sprintf( + `You have exposed your service on an external port on all nodes in your +cluster. If you want to expose this service to the external internet, you may +need to set up firewall rules for the service port(s) (%s) to serve traffic. + +See http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md for more details. +`, + makePortsString(obj.Spec.Ports, true)) + out.Write([]byte(msg)) + } + + if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok { + msg := fmt.Sprintf( + `You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges]. +It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future. +Please use the loadBalancerSourceRanges field instead. + +See http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md for more details. +`) + out.Write([]byte(msg)) + } + } + }, + } +} + +// GetFirstPod returns a pod matching the namespace and label selector +// and the number of all pods that match the label selector. +func GetFirstPod(client client.PodsNamespacer, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) { + options := api.ListOptions{LabelSelector: selector} + + podList, err := client.Pods(namespace).List(options) + if err != nil { + return nil, 0, err + } + pods := []*api.Pod{} + for i := range podList.Items { + pod := podList.Items[i] + pods = append(pods, &pod) + } + if len(pods) > 0 { + sort.Sort(sortBy(pods)) + return pods[0], len(podList.Items), nil + } + + // Watch until we observe a pod + options.ResourceVersion = podList.ResourceVersion + w, err := client.Pods(namespace).Watch(options) + if err != nil { + return nil, 0, err + } + defer w.Stop() + + condition := func(event watch.Event) (bool, error) { + return event.Type == watch.Added || event.Type == watch.Modified, nil + } + event, err := watch.Until(timeout, w, condition) + if err != nil { + return nil, 0, err + } + pod, ok := event.Object.(*api.Pod) + if !ok { + return nil, 0, fmt.Errorf("%#v is not a pod event", event) + } + return pod, 1, nil +} + +// Command will stringify and return all environment arguments ie. a command run by a client +// using the factory. +// TODO: We need to filter out stuff like secrets. +func (f *Factory) Command() string { + if len(os.Args) == 0 { + return "" + } + base := filepath.Base(os.Args[0]) + args := append([]string{base}, os.Args[1:]...) + return strings.Join(args, " ") +} + +// BindFlags adds any flags that are common to all kubectl sub commands. +func (f *Factory) BindFlags(flags *pflag.FlagSet) { + // Merge factory's flags + flags.AddFlagSet(f.flags) + + // Globally persistent flags across all subcommands. + // TODO Change flag names to consts to allow safer lookup from subcommands. + // TODO Add a verbose flag that turns on glog logging. Probably need a way + // to do that automatically for every subcommand. + flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version") + + // Normalize all flags that are coming from other packages or pre-configurations + // a.k.a. change all "_" to "-". e.g. glog package + flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) +} + +// BindCommonFlags adds any flags defined by external projects (not part of pflags) +func (f *Factory) BindExternalFlags(flags *pflag.FlagSet) { + // any flags defined by external projects (not part of pflags) + flags.AddGoFlagSet(flag.CommandLine) +} + +func makePortsString(ports []api.ServicePort, useNodePort bool) string { + pieces := make([]string, len(ports)) + for ix := range ports { + var port int32 + if useNodePort { + port = ports[ix].NodePort + } else { + port = ports[ix].Port + } + pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port) + } + return strings.Join(pieces, ",") +} + +func getPorts(spec api.PodSpec) []string { + result := []string{} + for _, container := range spec.Containers { + for _, port := range container.Ports { + result = append(result, strconv.Itoa(int(port.ContainerPort))) + } + } + return result +} + +func getProtocols(spec api.PodSpec) map[string]string { + result := make(map[string]string) + for _, container := range spec.Containers { + for _, port := range container.Ports { + result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) + } + } + return result +} + +// Extracts the ports exposed by a service from the given service spec. +func getServicePorts(spec api.ServiceSpec) []string { + result := []string{} + for _, servicePort := range spec.Ports { + result = append(result, strconv.Itoa(int(servicePort.Port))) + } + return result +} + +// Extracts the protocols exposed by a service from the given service spec. +func getServiceProtocols(spec api.ServiceSpec) map[string]string { + result := make(map[string]string) + for _, servicePort := range spec.Ports { + result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) + } + return result +} + +type clientSwaggerSchema struct { + c *client.Client + fedc *restclient.RESTClient + cacheDir string + mapper meta.RESTMapper +} + +const schemaFileName = "schema.json" + +type schemaClient interface { + Get() *restclient.Request +} + +func recursiveSplit(dir string) []string { + parent, file := path.Split(dir) + if len(parent) == 0 { + return []string{file} + } + return append(recursiveSplit(parent[:len(parent)-1]), file) +} + +func substituteUserHome(dir string) (string, error) { + if len(dir) == 0 || dir[0] != '~' { + return dir, nil + } + parts := recursiveSplit(dir) + if len(parts[0]) == 1 { + parts[0] = os.Getenv("HOME") + } else { + usr, err := user.Lookup(parts[0][1:]) + if err != nil { + return "", err + } + parts[0] = usr.HomeDir + } + return path.Join(parts...), nil +} + +func writeSchemaFile(schemaData []byte, cacheDir, cacheFile, prefix, groupVersion string) error { + if err := os.MkdirAll(path.Join(cacheDir, prefix, groupVersion), 0755); err != nil { + return err + } + tmpFile, err := ioutil.TempFile(cacheDir, "schema") + if err != nil { + // If we can't write, keep going. + if os.IsPermission(err) { + return nil + } + return err + } + if _, err := io.Copy(tmpFile, bytes.NewBuffer(schemaData)); err != nil { + return err + } + if err := os.Link(tmpFile.Name(), cacheFile); err != nil { + // If we can't write due to file existing, or permission problems, keep going. + if os.IsExist(err) || os.IsPermission(err) { + return nil + } + return err + } + return nil +} + +func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string, delegate validation.Schema) (err error) { + var schemaData []byte + var firstSeen bool + fullDir, err := substituteUserHome(cacheDir) + if err != nil { + return err + } + cacheFile := path.Join(fullDir, prefix, groupVersion, schemaFileName) + + if len(cacheDir) != 0 { + if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) { + return err + } + } + if schemaData == nil { + firstSeen = true + schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) + if err != nil { + return err + } + } + schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) + if err != nil { + return err + } + err = schema.ValidateBytes(data) + if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen { + // As a temporay hack, kubectl would re-get the schema if validation + // fails for type not found reason. + // TODO: runtime-config settings needs to make into the file's name + schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) + if err != nil { + return err + } + schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) + if err != nil { + return err + } + return schema.ValidateBytes(data) + } + + return err +} + +// Download swagger schema from apiserver and store it to file. +func downloadSchemaAndStore(c schemaClient, cacheDir, fullDir, cacheFile, prefix, groupVersion string) (schemaData []byte, err error) { + schemaData, err = c.Get(). + AbsPath("/swaggerapi", prefix, groupVersion). + Do(). + Raw() + if err != nil { + return + } + if len(cacheDir) != 0 { + if err = writeSchemaFile(schemaData, fullDir, cacheFile, prefix, groupVersion); err != nil { + return + } + } + return +} + +func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { + gvk, err := json.DefaultMetaFactory.Interpret(data) + if err != nil { + return err + } + if ok := registered.IsEnabledVersion(gvk.GroupVersion()); !ok { + return fmt.Errorf("API version %q isn't supported, only supports API versions %q", gvk.GroupVersion().String(), registered.EnabledVersions()) + } + if gvk.Group == autoscaling.GroupName { + if c.c.AutoscalingClient == nil { + return errors.New("unable to validate: no autoscaling client") + } + return getSchemaAndValidate(c.c.AutoscalingClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == policy.GroupName { + if c.c.PolicyClient == nil { + return errors.New("unable to validate: no policy client") + } + return getSchemaAndValidate(c.c.PolicyClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == apps.GroupName { + if c.c.AppsClient == nil { + return errors.New("unable to validate: no autoscaling client") + } + return getSchemaAndValidate(c.c.AppsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + + if gvk.Group == batch.GroupName { + if c.c.BatchClient == nil { + return errors.New("unable to validate: no batch client") + } + return getSchemaAndValidate(c.c.BatchClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == rbac.GroupName { + if c.c.RbacClient == nil { + return errors.New("unable to validate: no rbac client") + } + return getSchemaAndValidate(c.c.RbacClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { + // Don't attempt to validate third party objects + return nil + } + if gvk.Group == extensions.GroupName { + if c.c.ExtensionsClient == nil { + return errors.New("unable to validate: no experimental client") + } + return getSchemaAndValidate(c.c.ExtensionsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + if gvk.Group == federation.GroupName { + if c.fedc == nil { + return errors.New("unable to validate: no federation client") + } + return getSchemaAndValidate(c.fedc, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) + } + return getSchemaAndValidate(c.c.RESTClient, data, "api", gvk.GroupVersion().String(), c.cacheDir, c) +} + +// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy: +// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me. +// 1. Merge together the kubeconfig itself. This is done with the following hierarchy rules: +// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this, +// then no other kubeconfig files are merged. This file must exist. +// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged. +// 3. HomeDirectoryLocation +// Empty filenames are ignored. Files with non-deserializable content produced errors. +// The first file to set a particular value or map key wins and the value or map key is never changed. +// This means that the first file to set CurrentContext will have its context preserved. It also means +// that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// 2. Determine the context to use based on the first hit in this chain +// 1. command line argument - again, parsed from the command line, so it must be late bound +// 2. CurrentContext from the merged kubeconfig file +// 3. Empty is allowed at this stage +// 3. Determine the cluster info and auth info to use. At this point, we may or may not have a context. They +// are built based on the first hit in this chain. (run it twice, once for auth, once for cluster) +// 1. command line argument +// 2. If context is present, then use the context value +// 3. Empty is allowed +// 4. Determine the actual cluster info to use. At this point, we may or may not have a cluster info. Build +// each piece of the cluster info based on the chain: +// 1. command line argument +// 2. If cluster info is present and a value for the attribute is present, use it. +// 3. If you don't have a server location, bail. +// 5. Auth info is build using the same rules as cluster info, EXCEPT that you can only have one authentication +// technique per auth info. The following conditions result in an error: +// 1. If there are two conflicting techniques specified from the command line, fail. +// 2. If the command line does not specify one, and the auth info has conflicting techniques, fail. +// 3. If the command line specifies one and the auth info specifies another, honor the command line technique. +// 2. Use default values and potentially prompt for auth information +// +// However, if it appears that we're running in a kubernetes cluster +// container environment, then run with the auth info kubernetes mounted for +// us. Specifically: +// The env vars KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT are +// set, and the file /var/run/secrets/kubernetes.io/serviceaccount/token +// exists and is not a directory. +func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") + + overrides := &clientcmd.ConfigOverrides{} + flagNames := clientcmd.RecommendedConfigOverrideFlags("") + // short flagnames are disabled by default. These are here for compatibility with existing scripts + flagNames.ClusterOverrideFlags.APIServer.ShortName = "s" + + clientcmd.BindOverrideFlags(overrides, flags, flagNames) + clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin) + + return clientConfig +} + +// PrintObject prints an api object given command line flags to modify the output format +func (f *Factory) PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error { + gvks, _, err := api.Scheme.ObjectKinds(obj) + if err != nil { + return err + } + + mapping, err := mapper.RESTMapping(gvks[0].GroupKind()) + if err != nil { + return err + } + + printer, err := f.PrinterForMapping(cmd, mapping, false) + if err != nil { + return err + } + return printer.PrintObj(obj, out) +} + +// PrinterForMapping returns a printer suitable for displaying the provided resource type. +// Requires that printer flags have been added to cmd (see AddPrinterFlags). +func (f *Factory) PrinterForMapping(cmd *cobra.Command, mapping *meta.RESTMapping, withNamespace bool) (kubectl.ResourcePrinter, error) { + printer, ok, err := PrinterForCommand(cmd) + if err != nil { + return nil, err + } + if ok { + clientConfig, err := f.ClientConfig() + if err != nil { + return nil, err + } + + version, err := OutputVersion(cmd, clientConfig.GroupVersion) + if err != nil { + return nil, err + } + if version.IsEmpty() { + version = mapping.GroupVersionKind.GroupVersion() + } + if version.IsEmpty() { + return nil, fmt.Errorf("you must specify an output-version when using this output format") + } + + printer = kubectl.NewVersionedPrinter(printer, mapping.ObjectConvertor, version, mapping.GroupVersionKind.GroupVersion()) + + } else { + // Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper + columnLabel, err := cmd.Flags().GetStringSlice("label-columns") + if err != nil { + columnLabel = []string{} + } + printer, err = f.Printer(mapping, GetFlagBool(cmd, "no-headers"), withNamespace, GetWideFlag(cmd), GetFlagBool(cmd, "show-all"), GetFlagBool(cmd, "show-labels"), isWatch(cmd), columnLabel) + if err != nil { + return nil, err + } + printer = maybeWrapSortingPrinter(cmd, printer) + } + + return printer, nil +} + +// One stop shopping for a Builder +func (f *Factory) NewBuilder(thirdPartyDiscovery bool) *resource.Builder { + mapper, typer := f.Object(thirdPartyDiscovery) + + return resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go new file mode 100644 index 00000000..d7cb1bb9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go @@ -0,0 +1,611 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" + "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/strategicpatch" + + "github.com/evanphx/json-patch" + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const ( + ApplyAnnotationsFlag = "save-config" +) + +type debugError interface { + DebugError() (msg string, args []interface{}) +} + +// AddSourceToErr adds handleResourcePrefix and source string to error message. +// verb is the string like "creating", "deleting" etc. +// souce is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. +func AddSourceToErr(verb string, source string, err error) error { + if source != "" { + if statusError, ok := err.(errors.APIStatus); ok { + status := statusError.Status() + status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message) + return &errors.StatusError{ErrStatus: status} + } + return fmt.Errorf("error when %s %q: %v", verb, source, err) + } + return err +} + +var fatalErrHandler = fatal + +// BehaviorOnFatal allows you to override the default behavior when a fatal +// error occurs, which is call os.Exit(1). You can pass 'panic' as a function +// here if you prefer the panic() over os.Exit(1). +func BehaviorOnFatal(f func(string)) { + fatalErrHandler = f +} + +// DefaultBehaviorOnFatal allows you to undo any previous override. Useful in +// tests. +func DefaultBehaviorOnFatal() { + fatalErrHandler = fatal +} + +// fatal prints the message and then exits. If V(2) or greater, glog.Fatal +// is invoked for extended information. +func fatal(msg string) { + // add newline if needed + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + if glog.V(2) { + glog.FatalDepth(2, msg) + } + fmt.Fprint(os.Stderr, msg) + os.Exit(1) +} + +// CheckErr prints a user friendly error to STDERR and exits with a non-zero +// exit code. Unrecognized errors will be printed with an "error: " prefix. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func CheckErr(err error) { + checkErr(err, fatalErrHandler) +} + +func checkErr(err error, handleErr func(string)) { + if err == nil { + return + } + + if errors.IsInvalid(err) { + details := err.(*errors.StatusError).Status().Details + prefix := fmt.Sprintf("The %s %q is invalid.\n", details.Kind, details.Name) + errs := statusCausesToAggrError(details.Causes) + handleErr(MultilineError(prefix, errs)) + } + + if noMatch, ok := err.(*meta.NoResourceMatchError); ok { + switch { + case len(noMatch.PartialResource.Group) > 0 && len(noMatch.PartialResource.Version) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q and version %q", noMatch.PartialResource.Resource, noMatch.PartialResource.Group, noMatch.PartialResource.Version)) + case len(noMatch.PartialResource.Group) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q", noMatch.PartialResource.Resource, noMatch.PartialResource.Group)) + case len(noMatch.PartialResource.Version) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in version %q", noMatch.PartialResource.Resource, noMatch.PartialResource.Version)) + default: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q", noMatch.PartialResource.Resource)) + } + return + } + + // handle multiline errors + if clientcmd.IsConfigurationInvalid(err) { + handleErr(MultilineError("Error in configuration: ", err)) + } + if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) > 0 { + handleErr(MultipleErrors("", agg.Errors())) + } + + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + if !strings.HasPrefix(msg, "error: ") { + msg = fmt.Sprintf("error: %s", msg) + } + } + handleErr(msg) +} + +func statusCausesToAggrError(scs []unversioned.StatusCause) utilerrors.Aggregate { + errs := make([]error, len(scs)) + for i, sc := range scs { + errs[i] = fmt.Errorf("%s: %s", sc.Field, sc.Message) + } + return utilerrors.NewAggregate(errs) +} + +// StandardErrorMessage translates common errors into a human readable message, or returns +// false if the error is not one of the recognized types. It may also log extended +// information to glog. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func StandardErrorMessage(err error) (string, bool) { + if debugErr, ok := err.(debugError); ok { + glog.V(4).Infof(debugErr.DebugError()) + } + status, isStatus := err.(errors.APIStatus) + switch { + case isStatus: + switch s := status.Status(); { + case s.Reason == "Unauthorized": + return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true + default: + return fmt.Sprintf("Error from server: %s", err.Error()), true + } + case errors.IsUnexpectedObjectError(err): + return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true + } + switch t := err.(type) { + case *url.Error: + glog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) + switch { + case strings.Contains(t.Err.Error(), "connection refused"): + host := t.URL + if server, err := url.Parse(t.URL); err == nil { + host = server.Host + } + return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true + } + return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true + } + return "", false +} + +// MultilineError returns a string representing an error that splits sub errors into their own +// lines. The returned string will end with a newline. +func MultilineError(prefix string, err error) string { + if agg, ok := err.(utilerrors.Aggregate); ok { + errs := utilerrors.Flatten(agg).Errors() + buf := &bytes.Buffer{} + switch len(errs) { + case 0: + return fmt.Sprintf("%s%v\n", prefix, err) + case 1: + return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0])) + default: + fmt.Fprintln(buf, prefix) + for _, err := range errs { + fmt.Fprintf(buf, "* %v\n", messageForError(err)) + } + return buf.String() + } + } + return fmt.Sprintf("%s%s\n", prefix, err) +} + +// MultipleErrors returns a newline delimited string containing +// the prefix and referenced errors in standard form. +func MultipleErrors(prefix string, errs []error) string { + buf := &bytes.Buffer{} + for _, err := range errs { + fmt.Fprintf(buf, "%s%v\n", prefix, messageForError(err)) + } + return buf.String() +} + +// messageForError returns the string representing the error. +func messageForError(err error) string { + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + } + return msg +} + +func UsageError(cmd *cobra.Command, format string, args ...interface{}) error { + msg := fmt.Sprintf(format, args...) + return fmt.Errorf("%s\nSee '%s -h' for help and examples.", msg, cmd.CommandPath()) +} + +// Whether this cmd need watching objects. +func isWatch(cmd *cobra.Command) bool { + if w, err := cmd.Flags().GetBool("watch"); w && err == nil { + return true + } + + if wo, err := cmd.Flags().GetBool("watch-only"); wo && err == nil { + return true + } + + return false +} + +func getFlag(cmd *cobra.Command, flag string) *pflag.Flag { + f := cmd.Flags().Lookup(flag) + if f == nil { + glog.Fatalf("flag accessed but not defined for command %s: %s", cmd.Name(), flag) + } + return f +} + +func GetFlagString(cmd *cobra.Command, flag string) string { + s, err := cmd.Flags().GetString(flag) + if err != nil { + glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +// GetFlagStringList can be used to accept multiple argument with flag repetition (e.g. -f arg1 -f arg2 ...) +func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { + s, err := cmd.Flags().GetStringSlice(flag) + if err != nil { + glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +// GetWideFlag is used to determine if "-o wide" is used +func GetWideFlag(cmd *cobra.Command) bool { + f := cmd.Flags().Lookup("output") + if f.Value.String() == "wide" { + return true + } + return false +} + +func GetFlagBool(cmd *cobra.Command, flag string) bool { + b, err := cmd.Flags().GetBool(flag) + if err != nil { + glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return b +} + +// Assumes the flag has a default value. +func GetFlagInt(cmd *cobra.Command, flag string) int { + i, err := cmd.Flags().GetInt(flag) + if err != nil { + glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +// Assumes the flag has a default value. +func GetFlagInt64(cmd *cobra.Command, flag string) int64 { + i, err := cmd.Flags().GetInt64(flag) + if err != nil { + glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { + d, err := cmd.Flags().GetDuration(flag) + if err != nil { + glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return d +} + +func AddValidateFlags(cmd *cobra.Command) { + cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it") + cmd.Flags().String("schema-cache-dir", fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName), fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName)) + cmd.MarkFlagFilename("schema-cache-dir") +} + +func AddRecursiveFlag(cmd *cobra.Command, value *bool) { + cmd.Flags().BoolVarP(value, "recursive", "R", *value, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") +} + +// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. +func AddDryRunFlag(cmd *cobra.Command) { + cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") +} + +func AddApplyAnnotationFlags(cmd *cobra.Command) { + cmd.Flags().Bool(ApplyAnnotationsFlag, false, "If true, the configuration of current object will be saved in its annotation. This is useful when you want to perform kubectl apply on this object in the future.") +} + +// AddGeneratorFlags adds flags common to resource generation commands +// TODO: need to take a pass at other generator commands to use this set of flags +func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) { + cmd.Flags().String("generator", defaultGenerator, "The name of the API generator to use.") + AddDryRunFlag(cmd) +} + +func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + if len(data) == 0 { + return nil, fmt.Errorf(`Read from %s but no data found`, source) + } + + return data, nil +} + +// Merge requires JSON serialization +// TODO: merge assumes JSON serialization, and does not properly abstract API retrieval +func Merge(codec runtime.Codec, dst runtime.Object, fragment, kind string) (runtime.Object, error) { + // encode dst into versioned json and apply fragment directly too it + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patched, err := jsonpatch.MergePatch(target, []byte(fragment)) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// DumpReaderToFile writes all data from the given io.Reader to the specified file +// (usually for temporary use). +func DumpReaderToFile(reader io.Reader, filename string) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + if err != nil { + return err + } + buffer := make([]byte, 1024) + for { + count, err := reader.Read(buffer) + if err == io.EOF { + break + } + if err != nil { + return err + } + _, err = f.Write(buffer[:count]) + if err != nil { + return err + } + } + return nil +} + +// UpdateObject updates resource object with updateFn +func UpdateObject(info *resource.Info, codec runtime.Codec, updateFn func(runtime.Object) error) (runtime.Object, error) { + helper := resource.NewHelper(info.Client, info.Mapping) + + if err := updateFn(info.Object); err != nil { + return nil, err + } + + // Update the annotation used by kubectl apply + if err := kubectl.UpdateApplyAnnotation(info, codec); err != nil { + return nil, err + } + + if _, err := helper.Replace(info.Namespace, info.Name, true, info.Object); err != nil { + return nil, err + } + + return info.Object, nil +} + +// AddCmdRecordFlag adds --record flag to command +func AddRecordFlag(cmd *cobra.Command) { + cmd.Flags().Bool("record", false, "Record current kubectl command in the resource annotation.") +} + +func GetRecordFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "record") +} + +func GetDryRunFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "dry-run") +} + +// RecordChangeCause annotate change-cause to input runtime object. +func RecordChangeCause(obj runtime.Object, changeCause string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[kubectl.ChangeCauseAnnotation] = changeCause + accessor.SetAnnotations(annotations) + return nil +} + +// ChangeResourcePatch creates a strategic merge patch between the origin input resource info +// and the annotated with change-cause input resource info. +func ChangeResourcePatch(info *resource.Info, changeCause string) ([]byte, error) { + oldData, err := json.Marshal(info.Object) + if err != nil { + return nil, err + } + if err := RecordChangeCause(info.Object, changeCause); err != nil { + return nil, err + } + newData, err := json.Marshal(info.Object) + if err != nil { + return nil, err + } + return strategicpatch.CreateTwoWayMergePatch(oldData, newData, info.Object) +} + +// containsChangeCause checks if input resource info contains change-cause annotation. +func ContainsChangeCause(info *resource.Info) bool { + annotations, err := info.Mapping.MetadataAccessor.Annotations(info.Object) + if err != nil { + return false + } + return len(annotations[kubectl.ChangeCauseAnnotation]) > 0 +} + +// ShouldRecord checks if we should record current change cause +func ShouldRecord(cmd *cobra.Command, info *resource.Info) bool { + return GetRecordFlag(cmd) || ContainsChangeCause(info) +} + +// GetThirdPartyGroupVersions returns the thirdparty "group/versions"s and +// resources supported by the server. A user may delete a thirdparty resource +// when this function is running, so this function may return a "NotFound" error +// due to the race. +func GetThirdPartyGroupVersions(discovery discovery.DiscoveryInterface) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { + result := []unversioned.GroupVersion{} + gvks := []unversioned.GroupVersionKind{} + + groupList, err := discovery.ServerGroups() + if err != nil { + // On forbidden or not found, just return empty lists. + if errors.IsForbidden(err) || errors.IsNotFound(err) { + return result, gvks, nil + } + + return nil, nil, err + } + + for ix := range groupList.Groups { + group := &groupList.Groups[ix] + for jx := range group.Versions { + gv, err2 := unversioned.ParseGroupVersion(group.Versions[jx].GroupVersion) + if err2 != nil { + return nil, nil, err + } + // Skip GroupVersionKinds that have been statically registered. + if registered.IsRegisteredVersion(gv) { + continue + } + result = append(result, gv) + + resourceList, err := discovery.ServerResourcesForGroupVersion(group.Versions[jx].GroupVersion) + if err != nil { + return nil, nil, err + } + for kx := range resourceList.APIResources { + gvks = append(gvks, gv.WithKind(resourceList.APIResources[kx].Kind)) + } + } + } + return result, gvks, nil +} + +func GetIncludeThirdPartyAPIs(cmd *cobra.Command) bool { + if cmd.Flags().Lookup("include-extended-apis") == nil { + return false + } + return GetFlagBool(cmd, "include-extended-apis") +} + +func AddInclude3rdPartyFlags(cmd *cobra.Command) { + cmd.Flags().Bool("include-extended-apis", true, "If true, include definitions of new APIs via calls to the API server. [default true]") +} + +// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args +func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) { + foundPair := false + for _, s := range args { + nonResource := strings.Contains(s, "=") || strings.HasSuffix(s, "-") + switch { + case !foundPair && nonResource: + foundPair = true + fallthrough + case foundPair && nonResource: + pairArgs = append(pairArgs, s) + case !foundPair && !nonResource: + resources = append(resources, s) + case foundPair && !nonResource: + err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s) + return + } + } + return +} + +// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args +func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) { + newPairs = map[string]string{} + if supportRemove { + removePairs = []string{} + } + var invalidBuf bytes.Buffer + + for _, pairArg := range pairArgs { + if strings.Index(pairArg, "=") != -1 { + parts := strings.SplitN(pairArg, "=", 2) + if len(parts) != 2 || len(parts[1]) == 0 { + if invalidBuf.Len() > 0 { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(fmt.Sprintf(pairArg)) + } else { + newPairs[parts[0]] = parts[1] + } + } else if supportRemove && strings.HasSuffix(pairArg, "-") { + removePairs = append(removePairs, pairArg[:len(pairArg)-1]) + } else { + if invalidBuf.Len() > 0 { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(fmt.Sprintf(pairArg)) + } + } + if invalidBuf.Len() > 0 { + err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String()) + return + } + + return +} + +// MaybeConvertObject attempts to convert an object to a specific group/version. If the object is +// a third party resource it is simply passed through. +func MaybeConvertObject(obj runtime.Object, gv unversioned.GroupVersion, converter runtime.ObjectConvertor) (runtime.Object, error) { + switch obj.(type) { + case *extensions.ThirdPartyResourceData: + // conversion is not supported for 3rd party objects + return obj, nil + default: + return converter.ConvertToVersion(obj, gv) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go new file mode 100644 index 00000000..979ce7c3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go @@ -0,0 +1,136 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "io" + "strings" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/kubectl" + + "github.com/spf13/cobra" +) + +// AddPrinterFlags adds printing related flags to a command (e.g. output format, no headers, template path) +func AddPrinterFlags(cmd *cobra.Command) { + cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.3/docs/user-guide/jsonpath.md].") + cmd.Flags().String("output-version", "", "Output the formatted object with the given group version (for ex: 'extensions/v1beta1').") + cmd.Flags().Bool("no-headers", false, "When using the default output, don't print headers.") + cmd.Flags().Bool("show-labels", false, "When printing, show all labels as the last column (default hide labels column)") + cmd.Flags().String("template", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") + cmd.MarkFlagFilename("template") + cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. '{.metadata.name}'). The field in the API resource specified by this JSONPath expression must be an integer or a string.") + cmd.Flags().BoolP("show-all", "a", false, "When printing, show all resources (default hide terminated pods.)") +} + +// AddOutputFlagsForMutation adds output related flags to a command. Used by mutations only. +func AddOutputFlagsForMutation(cmd *cobra.Command) { + cmd.Flags().StringP("output", "o", "", "Output mode. Use \"-o name\" for shorter output (resource/name).") +} + +// PrintSuccess prints message after finishing mutating operations +func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, operation string) { + resource, _ = mapper.ResourceSingularizer(resource) + if shortOutput { + // -o name: prints resource/name + if len(resource) > 0 { + fmt.Fprintf(out, "%s/%s\n", resource, name) + } else { + fmt.Fprintf(out, "%s\n", name) + } + } else { + // understandable output by default + if len(resource) > 0 { + fmt.Fprintf(out, "%s \"%s\" %s\n", resource, name, operation) + } else { + fmt.Fprintf(out, "\"%s\" %s\n", name, operation) + } + } +} + +// ValidateOutputArgs validates -o flag args for mutations +func ValidateOutputArgs(cmd *cobra.Command) error { + outputMode := GetFlagString(cmd, "output") + if outputMode != "" && outputMode != "name" { + return UsageError(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", outputMode) + } + return nil +} + +// OutputVersion returns the preferred output version for generic content (JSON, YAML, or templates) +// defaultVersion is never mutated. Nil simply allows clean passing in common usage from client.Config +func OutputVersion(cmd *cobra.Command, defaultVersion *unversioned.GroupVersion) (unversioned.GroupVersion, error) { + outputVersionString := GetFlagString(cmd, "output-version") + if len(outputVersionString) == 0 { + if defaultVersion == nil { + return unversioned.GroupVersion{}, nil + } + + return *defaultVersion, nil + } + + return unversioned.ParseGroupVersion(outputVersionString) +} + +// PrinterForCommand returns the default printer for this command. +// Requires that printer flags have been added to cmd (see AddPrinterFlags). +func PrinterForCommand(cmd *cobra.Command) (kubectl.ResourcePrinter, bool, error) { + outputFormat := GetFlagString(cmd, "output") + + // templates are logically optional for specifying a format. + // TODO once https://github.com/kubernetes/kubernetes/issues/12668 is fixed, this should fall back to GetFlagString + templateFile, _ := cmd.Flags().GetString("template") + if len(outputFormat) == 0 && len(templateFile) != 0 { + outputFormat = "template" + } + + templateFormat := []string{ + "go-template=", "go-template-file=", "jsonpath=", "jsonpath-file=", "custom-columns=", "custom-columns-file=", + } + for _, format := range templateFormat { + if strings.HasPrefix(outputFormat, format) { + templateFile = outputFormat[len(format):] + outputFormat = format[:len(format)-1] + } + } + + printer, generic, err := kubectl.GetPrinter(outputFormat, templateFile) + if err != nil { + return nil, generic, err + } + + return maybeWrapSortingPrinter(cmd, printer), generic, nil +} + +func maybeWrapSortingPrinter(cmd *cobra.Command, printer kubectl.ResourcePrinter) kubectl.ResourcePrinter { + sorting, err := cmd.Flags().GetString("sort-by") + if err != nil { + // error can happen on missing flag or bad flag type. In either case, this command didn't intent to sort + return printer + } + + if len(sorting) != 0 { + return &kubectl.SortingPrinter{ + Delegate: printer, + SortField: fmt.Sprintf("{%s}", sorting), + } + } + return printer +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go b/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go new file mode 100644 index 00000000..04ed4aa6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go @@ -0,0 +1,212 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" +) + +// ConfigMapGeneratorV1 supports stable generation of a configMap. +type ConfigMapGeneratorV1 struct { + // Name of configMap (required) + Name string + // Type of configMap (optional) + Type string + // FileSources to derive the configMap from (optional) + FileSources []string + // LiteralSources to derive the configMap from (optional) + LiteralSources []string +} + +// Ensure it supports the generator pattern that uses parameter injection. +var _ Generator = &ConfigMapGeneratorV1{} + +// Ensure it supports the generator pattern that uses parameters specified during construction. +var _ StructuredGenerator = &ConfigMapGeneratorV1{} + +// Generate returns a configMap using the specified parameters. +func (s ConfigMapGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + err := ValidateParams(s.ParamNames(), genericParams) + if err != nil { + return nil, err + } + delegate := &ConfigMapGeneratorV1{} + fromFileStrings, found := genericParams["from-file"] + if found { + fromFileArray, isArray := fromFileStrings.([]string) + if !isArray { + return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) + } + delegate.FileSources = fromFileArray + delete(genericParams, "from-file") + } + fromLiteralStrings, found := genericParams["from-literal"] + if found { + fromLiteralArray, isArray := fromLiteralStrings.([]string) + if !isArray { + return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) + } + delegate.LiteralSources = fromLiteralArray + delete(genericParams, "from-literal") + } + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + delegate.Name = params["name"] + delegate.Type = params["type"] + return delegate.StructuredGenerate() +} + +// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. +func (s ConfigMapGeneratorV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + {"type", false}, + {"from-file", false}, + {"from-literal", false}, + {"force", false}, + } +} + +// StructuredGenerate outputs a configMap object using the configured fields. +func (s ConfigMapGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := s.validate(); err != nil { + return nil, err + } + configMap := &api.ConfigMap{} + configMap.Name = s.Name + configMap.Data = map[string]string{} + if len(s.FileSources) > 0 { + if err := handleConfigMapFromFileSources(configMap, s.FileSources); err != nil { + return nil, err + } + } + if len(s.LiteralSources) > 0 { + if err := handleConfigMapFromLiteralSources(configMap, s.LiteralSources); err != nil { + return nil, err + } + } + return configMap, nil +} + +// validate validates required fields are set to support structured generation. +func (s ConfigMapGeneratorV1) validate() error { + if len(s.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} + +// handleConfigMapFromLiteralSources adds the specified literal source +// information into the provided configMap. +func handleConfigMapFromLiteralSources(configMap *api.ConfigMap, literalSources []string) error { + for _, literalSource := range literalSources { + keyName, value, err := parseLiteralSource(literalSource) + if err != nil { + return err + } + err = addKeyFromLiteralToConfigMap(configMap, keyName, value) + if err != nil { + return err + } + } + return nil +} + +// handleConfigMapFromFileSources adds the specified file source information +// into the provided configMap +func handleConfigMapFromFileSources(configMap *api.ConfigMap, fileSources []string) error { + for _, fileSource := range fileSources { + keyName, filePath, err := parseFileSource(fileSource) + if err != nil { + return err + } + info, err := os.Stat(filePath) + if err != nil { + switch err := err.(type) { + case *os.PathError: + return fmt.Errorf("error reading %s: %v", filePath, err.Err) + default: + return fmt.Errorf("error reading %s: %v", filePath, err) + } + } + if info.IsDir() { + if strings.Contains(fileSource, "=") { + return fmt.Errorf("cannot give a key name for a directory path.") + } + fileList, err := ioutil.ReadDir(filePath) + if err != nil { + return fmt.Errorf("error listing files in %s: %v", filePath, err) + } + for _, item := range fileList { + itemPath := path.Join(filePath, item.Name()) + if item.Mode().IsRegular() { + keyName = item.Name() + err = addKeyFromFileToConfigMap(configMap, keyName, itemPath) + if err != nil { + return err + } + } + } + } else { + err = addKeyFromFileToConfigMap(configMap, keyName, filePath) + if err != nil { + return err + } + } + } + + return nil +} + +// addKeyFromFileToConfigMap adds a key with the given name to a ConfigMap, populating +// the value with the content of the given file path, or returns an error. +func addKeyFromFileToConfigMap(configMap *api.ConfigMap, keyName, filePath string) error { + data, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + return addKeyFromLiteralToConfigMap(configMap, keyName, string(data)) +} + +// addKeyFromLiteralToConfigMap adds the given key and data to the given config map, +// returning an error if the key is not valid or if the key already exists. +func addKeyFromLiteralToConfigMap(configMap *api.ConfigMap, keyName, data string) error { + // Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys + // to be consistent; validation.IsSecretKey is used here intentionally. + if !validation.IsSecretKey(keyName) { + return fmt.Errorf("%v is not a valid key name for a configMap", keyName) + } + if _, entryExists := configMap.Data[keyName]; entryExists { + return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, configMap.Data) + } + configMap.Data[keyName] = data + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go new file mode 100644 index 00000000..255ad1de --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go @@ -0,0 +1,224 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bufio" + "bytes" + "fmt" + "io" + "reflect" + "regexp" + "strings" + "text/tabwriter" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/jsonpath" +) + +const ( + columnwidth = 10 + tabwidth = 4 + padding = 3 + padding_character = ' ' + flags = 0 +) + +var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") + +// MassageJSONPath attempts to be flexible with JSONPath expressions, it accepts: +// * metadata.name (no leading '.' or curly brances '{...}' +// * {metadata.name} (no leading '.') +// * .metadata.name (no curly braces '{...}') +// * {.metadata.name} (complete expression) +// And transforms them all into a valid jsonpat expression: +// {.metadata.name} +func massageJSONPath(pathExpression string) (string, error) { + if len(pathExpression) == 0 { + return pathExpression, nil + } + submatches := jsonRegexp.FindStringSubmatch(pathExpression) + if submatches == nil { + return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") + } + if len(submatches) != 3 { + return "", fmt.Errorf("unexpected submatch list: %v", submatches) + } + var fieldSpec string + if len(submatches[1]) != 0 { + fieldSpec = submatches[1] + } else { + fieldSpec = submatches[2] + } + return fmt.Sprintf("{.%s}", fieldSpec), nil +} + +// NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of
: pairs. +// e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: +// +// NAME API_VERSION +// foo bar +func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("custom-columns format specified but no custom columns given") + } + parts := strings.Split(spec, ",") + columns := make([]Column, len(parts)) + for ix := range parts { + colSpec := strings.Split(parts[ix], ":") + if len(colSpec) != 2 { + return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected
:", parts[ix]) + } + spec, err := massageJSONPath(colSpec[1]) + if err != nil { + return nil, err + } + columns[ix] = Column{Header: colSpec[0], FieldSpec: spec} + } + return &CustomColumnsPrinter{Columns: columns, Decoder: decoder}, nil +} + +func splitOnWhitespace(line string) []string { + lineScanner := bufio.NewScanner(bytes.NewBufferString(line)) + lineScanner.Split(bufio.ScanWords) + result := []string{} + for lineScanner.Scan() { + result = append(result, lineScanner.Text()) + } + return result +} + +// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected +// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec +// For example the template below: +// NAME API_VERSION +// {metadata.name} {apiVersion} +func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { + scanner := bufio.NewScanner(templateReader) + if !scanner.Scan() { + return nil, fmt.Errorf("invalid template, missing header line. Expected format is one line of space separated headers, one line of space separated column specs.") + } + headers := splitOnWhitespace(scanner.Text()) + + if !scanner.Scan() { + return nil, fmt.Errorf("invalid template, missing spec line. Expected format is one line of space separated headers, one line of space separated column specs.") + } + specs := splitOnWhitespace(scanner.Text()) + + if len(headers) != len(specs) { + return nil, fmt.Errorf("number of headers (%d) and field specifications (%d) don't match", len(headers), len(specs)) + } + + columns := make([]Column, len(headers)) + for ix := range headers { + spec, err := massageJSONPath(specs[ix]) + if err != nil { + return nil, err + } + columns[ix] = Column{ + Header: headers[ix], + FieldSpec: spec, + } + } + return &CustomColumnsPrinter{Columns: columns, Decoder: decoder}, nil +} + +// Column represents a user specified column +type Column struct { + // The header to print above the column, general style is ALL_CAPS + Header string + // The pointer to the field in the object to print in JSONPath form + // e.g. {.ObjectMeta.Name}, see pkg/util/jsonpath for more details. + FieldSpec string +} + +// CustomColumnPrinter is a printer that knows how to print arbitrary columns +// of data from templates specified in the `Columns` array +type CustomColumnsPrinter struct { + Columns []Column + Decoder runtime.Decoder +} + +func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error { + w := tabwriter.NewWriter(out, columnwidth, tabwidth, padding, padding_character, flags) + headers := make([]string, len(s.Columns)) + for ix := range s.Columns { + headers[ix] = s.Columns[ix].Header + } + fmt.Fprintln(w, strings.Join(headers, "\t")) + parsers := make([]*jsonpath.JSONPath, len(s.Columns)) + for ix := range s.Columns { + parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)) + if err := parsers[ix].Parse(s.Columns[ix].FieldSpec); err != nil { + return err + } + } + + if meta.IsListType(obj) { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + for ix := range objs { + if err := s.printOneObject(objs[ix], parsers, w); err != nil { + return err + } + } + } else { + if err := s.printOneObject(obj, parsers, w); err != nil { + return err + } + } + return w.Flush() +} + +func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jsonpath.JSONPath, out io.Writer) error { + columns := make([]string, len(parsers)) + switch u := obj.(type) { + case *runtime.Unknown: + if len(u.Raw) > 0 { + var err error + if obj, err = runtime.Decode(s.Decoder, u.Raw); err != nil { + return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.Raw) + } + } + } + for ix := range parsers { + parser := parsers[ix] + values, err := parser.FindResults(reflect.ValueOf(obj).Elem().Interface()) + if err != nil { + return err + } + if len(values) == 0 || len(values[0]) == 0 { + fmt.Fprintf(out, "\t") + } + valueStrings := []string{} + for arrIx := range values { + for valIx := range values[arrIx] { + valueStrings = append(valueStrings, fmt.Sprintf("%v", values[arrIx][valIx].Interface())) + } + } + columns[ix] = strings.Join(valueStrings, ",") + } + fmt.Fprintln(out, strings.Join(columns, "\t")) + return nil +} + +func (s *CustomColumnsPrinter) HandledResources() []string { + return []string{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go new file mode 100644 index 00000000..3adb5e74 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go @@ -0,0 +1,2487 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/federation/apis/federation" + fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + client "k8s.io/kubernetes/pkg/client/unversioned" + adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" + "k8s.io/kubernetes/pkg/fieldpath" + "k8s.io/kubernetes/pkg/fields" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/types" + deploymentutil "k8s.io/kubernetes/pkg/util/deployment" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/sets" +) + +// Describer generates output for the named resource or an error +// if the output could not be generated. Implementers typically +// abstract the retrieval of the named object from a remote server. +type Describer interface { + Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) +} + +// DescriberSettings holds display configuration for each object +// describer to control what is printed. +type DescriberSettings struct { + ShowEvents bool +} + +// ObjectDescriber is an interface for displaying arbitrary objects with extra +// information. Use when an object is in hand (on disk, or already retrieved). +// Implementers may ignore the additional information passed on extra, or use it +// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer +// is found. +type ObjectDescriber interface { + DescribeObject(object interface{}, extra ...interface{}) (output string, err error) +} + +// ErrNoDescriber is a structured error indicating the provided object or objects +// cannot be described. +type ErrNoDescriber struct { + Types []string +} + +// Error implements the error interface. +func (e ErrNoDescriber) Error() string { + return fmt.Sprintf("no describer has been defined for %v", e.Types) +} + +func describerMap(c *client.Client) map[unversioned.GroupKind]Describer { + m := map[unversioned.GroupKind]Describer{ + api.Kind("Pod"): &PodDescriber{c}, + api.Kind("ReplicationController"): &ReplicationControllerDescriber{c}, + api.Kind("Secret"): &SecretDescriber{c}, + api.Kind("Service"): &ServiceDescriber{c}, + api.Kind("ServiceAccount"): &ServiceAccountDescriber{c}, + api.Kind("Node"): &NodeDescriber{c}, + api.Kind("LimitRange"): &LimitRangeDescriber{c}, + api.Kind("ResourceQuota"): &ResourceQuotaDescriber{c}, + api.Kind("PersistentVolume"): &PersistentVolumeDescriber{c}, + api.Kind("PersistentVolumeClaim"): &PersistentVolumeClaimDescriber{c}, + api.Kind("Namespace"): &NamespaceDescriber{c}, + api.Kind("Endpoints"): &EndpointsDescriber{c}, + api.Kind("ConfigMap"): &ConfigMapDescriber{c}, + + api.Kind("SecurityContextConstraints"): &SecurityContextConstraintsDescriber{c}, + + extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, + extensions.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, + extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, + autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, + extensions.Kind("DaemonSet"): &DaemonSetDescriber{c}, + extensions.Kind("Deployment"): &DeploymentDescriber{adapter.FromUnversionedClient(c)}, + extensions.Kind("Job"): &JobDescriber{c}, + batch.Kind("Job"): &JobDescriber{c}, + apps.Kind("PetSet"): &PetSetDescriber{c}, + extensions.Kind("Ingress"): &IngressDescriber{c}, + } + + return m +} + +// List of all resource types we can describe +func DescribableResources() []string { + keys := make([]string, 0) + + for k := range describerMap(nil) { + resource := strings.ToLower(k.Kind) + keys = append(keys, resource) + } + return keys +} + +// Describer returns the default describe functions for each of the standard +// Kubernetes types. +func DescriberFor(kind unversioned.GroupKind, c *client.Client) (Describer, bool) { + f, ok := describerMap(c)[kind] + return f, ok +} + +// DefaultObjectDescriber can describe the default Kubernetes objects. +var DefaultObjectDescriber ObjectDescriber + +func init() { + d := &Describers{} + err := d.Add( + describeLimitRange, + describeQuota, + describePod, + describeService, + describeReplicationController, + describeDaemonSet, + describeNode, + describeNamespace, + ) + if err != nil { + glog.Fatalf("Cannot register describers: %v", err) + } + DefaultObjectDescriber = d +} + +// NamespaceDescriber generates information about a namespace +type NamespaceDescriber struct { + client.Interface +} + +func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + ns, err := d.Namespaces().Get(name) + if err != nil { + return "", err + } + resourceQuotaList, err := d.ResourceQuotas(name).List(api.ListOptions{}) + if err != nil { + return "", err + } + limitRangeList, err := d.LimitRanges(name).List(api.ListOptions{}) + if err != nil { + return "", err + } + + return describeNamespace(ns, resourceQuotaList, limitRangeList) +} + +func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", namespace.Name) + printLabelsMultiline(out, "Labels", namespace.Labels) + fmt.Fprintf(out, "Status:\t%s\n", string(namespace.Status.Phase)) + if resourceQuotaList != nil { + fmt.Fprintf(out, "\n") + DescribeResourceQuotas(resourceQuotaList, out) + } + if limitRangeList != nil { + fmt.Fprintf(out, "\n") + DescribeLimitRanges(limitRangeList, out) + } + return nil + }) +} + +// DescribeLimitRanges merges a set of limit range items into a single tabular description +func DescribeLimitRanges(limitRanges *api.LimitRangeList, w io.Writer) { + if len(limitRanges.Items) == 0 { + fmt.Fprint(w, "No resource limits.\n") + return + } + fmt.Fprintf(w, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") + fmt.Fprintf(w, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") + for _, limitRange := range limitRanges.Items { + for i := range limitRange.Spec.Limits { + item := limitRange.Spec.Limits[i] + maxResources := item.Max + minResources := item.Min + defaultLimitResources := item.Default + defaultRequestResources := item.DefaultRequest + ratio := item.MaxLimitRequestRatio + + set := map[api.ResourceName]bool{} + for k := range maxResources { + set[k] = true + } + for k := range minResources { + set[k] = true + } + for k := range defaultLimitResources { + set[k] = true + } + for k := range defaultRequestResources { + set[k] = true + } + for k := range ratio { + set[k] = true + } + + for k := range set { + // if no value is set, we output - + maxValue := "-" + minValue := "-" + defaultLimitValue := "-" + defaultRequestValue := "-" + ratioValue := "-" + + maxQuantity, maxQuantityFound := maxResources[k] + if maxQuantityFound { + maxValue = maxQuantity.String() + } + + minQuantity, minQuantityFound := minResources[k] + if minQuantityFound { + minValue = minQuantity.String() + } + + defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] + if defaultLimitQuantityFound { + defaultLimitValue = defaultLimitQuantity.String() + } + + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] + if defaultRequestQuantityFound { + defaultRequestValue = defaultRequestQuantity.String() + } + + ratioQuantity, ratioQuantityFound := ratio[k] + if ratioQuantityFound { + ratioValue = ratioQuantity.String() + } + + msg := " %s\t%v\t%v\t%v\t%v\t%v\t%v\n" + fmt.Fprintf(w, msg, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) + } + } + } +} + +// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas +func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w io.Writer) { + if len(quotas.Items) == 0 { + fmt.Fprint(w, "No resource quota.\n") + return + } + sort.Sort(SortableResourceQuotas(quotas.Items)) + + fmt.Fprint(w, "Resource Quotas") + for _, q := range quotas.Items { + fmt.Fprintf(w, "\n Name:\t%s\n", q.Name) + if len(q.Spec.Scopes) > 0 { + scopes := make([]string, 0, len(q.Spec.Scopes)) + for _, scope := range q.Spec.Scopes { + scopes = append(scopes, string(scope)) + } + sort.Strings(scopes) + fmt.Fprintf(w, " Scopes:\t%s\n", strings.Join(scopes, ", ")) + for _, scope := range scopes { + helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) + if len(helpText) > 0 { + fmt.Fprintf(w, " * %s\n", helpText) + } + } + } + + fmt.Fprintf(w, " Resource\tUsed\tHard\n") + fmt.Fprint(w, " --------\t---\t---\n") + + resources := make([]api.ResourceName, 0, len(q.Status.Hard)) + for resource := range q.Status.Hard { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + + for _, resource := range resources { + hardQuantity := q.Status.Hard[resource] + usedQuantity := q.Status.Used[resource] + fmt.Fprintf(w, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String()) + } + } +} + +// LimitRangeDescriber generates information about a limit range +type LimitRangeDescriber struct { + client.Interface +} + +func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + lr := d.LimitRanges(namespace) + + limitRange, err := lr.Get(name) + if err != nil { + return "", err + } + return describeLimitRange(limitRange) +} + +func describeLimitRange(limitRange *api.LimitRange) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", limitRange.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", limitRange.Namespace) + fmt.Fprintf(out, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") + fmt.Fprintf(out, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") + for i := range limitRange.Spec.Limits { + item := limitRange.Spec.Limits[i] + maxResources := item.Max + minResources := item.Min + defaultLimitResources := item.Default + defaultRequestResources := item.DefaultRequest + ratio := item.MaxLimitRequestRatio + + set := map[api.ResourceName]bool{} + for k := range maxResources { + set[k] = true + } + for k := range minResources { + set[k] = true + } + for k := range defaultLimitResources { + set[k] = true + } + for k := range defaultRequestResources { + set[k] = true + } + for k := range ratio { + set[k] = true + } + + for k := range set { + // if no value is set, we output - + maxValue := "-" + minValue := "-" + defaultLimitValue := "-" + defaultRequestValue := "-" + ratioValue := "-" + + maxQuantity, maxQuantityFound := maxResources[k] + if maxQuantityFound { + maxValue = maxQuantity.String() + } + + minQuantity, minQuantityFound := minResources[k] + if minQuantityFound { + minValue = minQuantity.String() + } + + defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] + if defaultLimitQuantityFound { + defaultLimitValue = defaultLimitQuantity.String() + } + + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] + if defaultRequestQuantityFound { + defaultRequestValue = defaultRequestQuantity.String() + } + + ratioQuantity, ratioQuantityFound := ratio[k] + if ratioQuantityFound { + ratioValue = ratioQuantity.String() + } + + msg := "%v\t%v\t%v\t%v\t%v\t%v\t%v\n" + fmt.Fprintf(out, msg, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) + } + } + return nil + }) +} + +// ResourceQuotaDescriber generates information about a resource quota +type ResourceQuotaDescriber struct { + client.Interface +} + +func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + rq := d.ResourceQuotas(namespace) + + resourceQuota, err := rq.Get(name) + if err != nil { + return "", err + } + + return describeQuota(resourceQuota) +} + +func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string { + switch scope { + case api.ResourceQuotaScopeTerminating: + return "Matches all pods that have an active deadline." + case api.ResourceQuotaScopeNotTerminating: + return "Matches all pods that do not have an active deadline." + case api.ResourceQuotaScopeBestEffort: + return "Matches all pods that have best effort quality of service." + case api.ResourceQuotaScopeNotBestEffort: + return "Matches all pods that do not have best effort quality of service." + default: + return "" + } +} +func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", resourceQuota.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", resourceQuota.Namespace) + if len(resourceQuota.Spec.Scopes) > 0 { + scopes := make([]string, 0, len(resourceQuota.Spec.Scopes)) + for _, scope := range resourceQuota.Spec.Scopes { + scopes = append(scopes, string(scope)) + } + sort.Strings(scopes) + fmt.Fprintf(out, "Scopes:\t%s\n", strings.Join(scopes, ", ")) + for _, scope := range scopes { + helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) + if len(helpText) > 0 { + fmt.Fprintf(out, " * %s\n", helpText) + } + } + } + fmt.Fprintf(out, "Resource\tUsed\tHard\n") + fmt.Fprintf(out, "--------\t----\t----\n") + + resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) + for resource := range resourceQuota.Status.Hard { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + + msg := "%v\t%v\t%v\n" + for i := range resources { + resource := resources[i] + hardQuantity := resourceQuota.Status.Hard[resource] + usedQuantity := resourceQuota.Status.Used[resource] + fmt.Fprintf(out, msg, resource, usedQuantity.String(), hardQuantity.String()) + } + return nil + }) +} + +// SecurityContextConstraintsDescriber generates information about an SCC +type SecurityContextConstraintsDescriber struct { + client.Interface +} + +func (d *SecurityContextConstraintsDescriber) Describe(namespace, name string, s DescriberSettings) (string, error) { + scc, err := d.SecurityContextConstraints().Get(name) + if err != nil { + return "", err + } + return describeSecurityContextConstraints(scc) +} + +func describeSecurityContextConstraints(scc *api.SecurityContextConstraints) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", scc.Name) + + priority := "" + if scc.Priority != nil { + priority = fmt.Sprintf("%d", *scc.Priority) + } + fmt.Fprintf(out, "Priority:\t%s\n", stringOrNone(priority)) + + fmt.Fprintf(out, "Access:\t\n") + fmt.Fprintf(out, " Users:\t%s\n", stringOrNone(strings.Join(scc.Users, ","))) + fmt.Fprintf(out, " Groups:\t%s\n", stringOrNone(strings.Join(scc.Groups, ","))) + + fmt.Fprintf(out, "Settings:\t\n") + fmt.Fprintf(out, " Allow Privileged:\t%t\n", scc.AllowPrivilegedContainer) + fmt.Fprintf(out, " Default Add Capabilities:\t%s\n", capsToString(scc.DefaultAddCapabilities)) + fmt.Fprintf(out, " Required Drop Capabilities:\t%s\n", capsToString(scc.RequiredDropCapabilities)) + fmt.Fprintf(out, " Allowed Capabilities:\t%s\n", capsToString(scc.AllowedCapabilities)) + fmt.Fprintf(out, " Allowed Volume Types:\t%s\n", fsTypeToString(scc.Volumes)) + fmt.Fprintf(out, " Allow Host Network:\t%t\n", scc.AllowHostNetwork) + fmt.Fprintf(out, " Allow Host Ports:\t%t\n", scc.AllowHostPorts) + fmt.Fprintf(out, " Allow Host PID:\t%t\n", scc.AllowHostPID) + fmt.Fprintf(out, " Allow Host IPC:\t%t\n", scc.AllowHostIPC) + fmt.Fprintf(out, " Read Only Root Filesystem:\t%t\n", scc.ReadOnlyRootFilesystem) + + fmt.Fprintf(out, " Run As User Strategy: %s\t\n", string(scc.RunAsUser.Type)) + uid := "" + if scc.RunAsUser.UID != nil { + uid = strconv.FormatInt(*scc.RunAsUser.UID, 10) + } + fmt.Fprintf(out, " UID:\t%s\n", stringOrNone(uid)) + + uidRangeMin := "" + if scc.RunAsUser.UIDRangeMin != nil { + uidRangeMin = strconv.FormatInt(*scc.RunAsUser.UIDRangeMin, 10) + } + fmt.Fprintf(out, " UID Range Min:\t%s\n", stringOrNone(uidRangeMin)) + + uidRangeMax := "" + if scc.RunAsUser.UIDRangeMax != nil { + uidRangeMax = strconv.FormatInt(*scc.RunAsUser.UIDRangeMax, 10) + } + fmt.Fprintf(out, " UID Range Max:\t%s\n", stringOrNone(uidRangeMax)) + + fmt.Fprintf(out, " SELinux Context Strategy: %s\t\n", string(scc.SELinuxContext.Type)) + var user, role, seLinuxType, level string + if scc.SELinuxContext.SELinuxOptions != nil { + user = scc.SELinuxContext.SELinuxOptions.User + role = scc.SELinuxContext.SELinuxOptions.Role + seLinuxType = scc.SELinuxContext.SELinuxOptions.Type + level = scc.SELinuxContext.SELinuxOptions.Level + } + fmt.Fprintf(out, " User:\t%s\n", stringOrNone(user)) + fmt.Fprintf(out, " Role:\t%s\n", stringOrNone(role)) + fmt.Fprintf(out, " Type:\t%s\n", stringOrNone(seLinuxType)) + fmt.Fprintf(out, " Level:\t%s\n", stringOrNone(level)) + + fmt.Fprintf(out, " FSGroup Strategy: %s\t\n", string(scc.FSGroup.Type)) + fmt.Fprintf(out, " Ranges:\t%s\n", idRangeToString(scc.FSGroup.Ranges)) + + fmt.Fprintf(out, " Supplemental Groups Strategy: %s\t\n", string(scc.SupplementalGroups.Type)) + fmt.Fprintf(out, " Ranges:\t%s\n", idRangeToString(scc.SupplementalGroups.Ranges)) + + return nil + }) +} + +func stringOrNone(s string) string { + if len(s) > 0 { + return s + } + return "" +} + +func fsTypeToString(volumes []api.FSType) string { + strVolumes := []string{} + for _, v := range volumes { + strVolumes = append(strVolumes, string(v)) + } + return stringOrNone(strings.Join(strVolumes, ",")) +} + +func idRangeToString(ranges []api.IDRange) string { + formattedString := "" + if ranges != nil { + strRanges := []string{} + for _, r := range ranges { + strRanges = append(strRanges, fmt.Sprintf("%d-%d", r.Min, r.Max)) + } + formattedString = strings.Join(strRanges, ",") + } + return stringOrNone(formattedString) +} + +func capsToString(caps []api.Capability) string { + formattedString := "" + if caps != nil { + strCaps := []string{} + for _, c := range caps { + strCaps = append(strCaps, string(c)) + } + formattedString = strings.Join(strCaps, ",") + } + return stringOrNone(formattedString) +} + +// PodDescriber generates information about a pod and the replication controllers that +// create it. +type PodDescriber struct { + client.Interface +} + +func (d *PodDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + pod, err := d.Pods(namespace).Get(name) + if err != nil { + if describerSettings.ShowEvents { + eventsInterface := d.Events(namespace) + selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) + options := api.ListOptions{FieldSelector: selector} + events, err2 := eventsInterface.List(options) + if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err) + DescribeEvents(events, out) + return nil + }) + } + } + return "", err + } + + var events *api.EventList + if describerSettings.ShowEvents { + if ref, err := api.GetReference(pod); err != nil { + glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) + } else { + ref.Kind = "" + events, _ = d.Events(namespace).Search(ref) + } + } + + return describePod(pod, events) +} + +func describePod(pod *api.Pod, events *api.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", pod.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) + if len(pod.Annotations["openshift.io/scc"]) > 0 { + fmt.Fprintf(out, "Security Policy:\t%s\n", pod.Annotations["openshift.io/scc"]) + } + fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) + if pod.Status.StartTime != nil { + fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) + } + printLabelsMultiline(out, "Labels", pod.Labels) + if pod.DeletionTimestamp != nil { + fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) + } else { + fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase)) + } + if len(pod.Status.Reason) > 0 { + fmt.Fprintf(out, "Reason:\t%s\n", pod.Status.Reason) + } + if len(pod.Status.Message) > 0 { + fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message) + } + fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) + fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations)) + if len(pod.Spec.InitContainers) > 0 { + describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), out, "") + } + describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "") + if len(pod.Status.Conditions) > 0 { + fmt.Fprint(out, "Conditions:\n Type\tStatus\n") + for _, c := range pod.Status.Conditions { + fmt.Fprintf(out, " %v \t%v \n", + c.Type, + c.Status) + } + } + describeVolumes(pod.Spec.Volumes, out, "") + fmt.Fprintf(out, "QoS Tier:\t%s\n", qosutil.GetPodQos(pod)) + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +func printControllers(annotation map[string]string) string { + value, ok := annotation["kubernetes.io/created-by"] + if ok { + var r api.SerializedReference + err := json.Unmarshal([]byte(value), &r) + if err == nil { + return fmt.Sprintf("%s/%s", r.Reference.Kind, r.Reference.Name) + } + } + return "" +} + +// TODO: Do a better job at indenting, maybe by using a prefix writer +func describeVolumes(volumes []api.Volume, out io.Writer, space string) { + if volumes == nil || len(volumes) == 0 { + fmt.Fprintf(out, "%sNo volumes.\n", space) + return + } + fmt.Fprintf(out, "%sVolumes:\n", space) + for _, volume := range volumes { + nameIndent := "" + if len(space) > 0 { + nameIndent = " " + } + fmt.Fprintf(out, " %s%v:\n", nameIndent, volume.Name) + switch { + case volume.VolumeSource.HostPath != nil: + printHostPathVolumeSource(volume.VolumeSource.HostPath, out) + case volume.VolumeSource.EmptyDir != nil: + printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, out) + case volume.VolumeSource.GCEPersistentDisk != nil: + printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, out) + case volume.VolumeSource.AWSElasticBlockStore != nil: + printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, out) + case volume.VolumeSource.GitRepo != nil: + printGitRepoVolumeSource(volume.VolumeSource.GitRepo, out) + case volume.VolumeSource.Secret != nil: + printSecretVolumeSource(volume.VolumeSource.Secret, out) + case volume.VolumeSource.ConfigMap != nil: + printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, out) + case volume.VolumeSource.NFS != nil: + printNFSVolumeSource(volume.VolumeSource.NFS, out) + case volume.VolumeSource.ISCSI != nil: + printISCSIVolumeSource(volume.VolumeSource.ISCSI, out) + case volume.VolumeSource.Glusterfs != nil: + printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, out) + case volume.VolumeSource.PersistentVolumeClaim != nil: + printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, out) + case volume.VolumeSource.RBD != nil: + printRBDVolumeSource(volume.VolumeSource.RBD, out) + case volume.VolumeSource.DownwardAPI != nil: + printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, out) + default: + fmt.Fprintf(out, " \n") + } + } +} + +func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tHostPath (bare host directory volume)\n"+ + " Path:\t%v\n", hostPath.Path) +} + +func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ + " Medium:\t%v\n", emptyDir.Medium) +} + +func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ + " PDName:\t%v\n"+ + " FSType:\t%v\n"+ + " Partition:\t%v\n"+ + " ReadOnly:\t%v\n", + gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) +} + +func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ + " VolumeID:\t%v\n"+ + " FSType:\t%v\n"+ + " Partition:\t%v\n"+ + " ReadOnly:\t%v\n", + aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) +} + +func printGitRepoVolumeSource(git *api.GitRepoVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ + " Repository:\t%v\n"+ + " Revision:\t%v\n", + git.Repository, git.Revision) +} + +func printSecretVolumeSource(secret *api.SecretVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tSecret (a volume populated by a Secret)\n"+ + " SecretName:\t%v\n", secret.SecretName) +} + +func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ + " Name:\t%v\n", configMap.Name) +} + +func printNFSVolumeSource(nfs *api.NFSVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ + " Server:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + nfs.Server, nfs.Path, nfs.ReadOnly) +} + +func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ + " TargetPortal:\t%v\n"+ + " IQN:\t%v\n"+ + " Lun:\t%v\n"+ + " ISCSIInterface\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly) +} + +func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ + " EndpointsName:\t%v\n"+ + " Path:\t%v\n"+ + " ReadOnly:\t%v\n", + glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) +} + +func printPersistentVolumeClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ + " ClaimName:\t%v\n"+ + " ReadOnly:\t%v\n", + claim.ClaimName, claim.ReadOnly) +} + +func printRBDVolumeSource(rbd *api.RBDVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ + " CephMonitors:\t%v\n"+ + " RBDImage:\t%v\n"+ + " FSType:\t%v\n"+ + " RBDPool:\t%v\n"+ + " RadosUser:\t%v\n"+ + " Keyring:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) +} + +func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, out io.Writer) { + fmt.Fprintf(out, " Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") + for _, mapping := range d.Items { + if mapping.FieldRef != nil { + fmt.Fprintf(out, " %v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) + } + if mapping.ResourceFieldRef != nil { + fmt.Fprintf(out, " %v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path) + } + } +} + +type PersistentVolumeDescriber struct { + client.Interface +} + +func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.PersistentVolumes() + + pv, err := c.Get(name) + if err != nil { + return "", err + } + + storage := pv.Spec.Capacity[api.ResourceStorage] + + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(pv) + } + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", pv.Name) + printLabelsMultiline(out, "Labels", pv.Labels) + fmt.Fprintf(out, "Status:\t%s\n", pv.Status.Phase) + if pv.Spec.ClaimRef != nil { + fmt.Fprintf(out, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) + } else { + fmt.Fprintf(out, "Claim:\t%s\n", "") + } + fmt.Fprintf(out, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) + fmt.Fprintf(out, "Access Modes:\t%s\n", api.GetAccessModesAsString(pv.Spec.AccessModes)) + fmt.Fprintf(out, "Capacity:\t%s\n", storage.String()) + fmt.Fprintf(out, "Message:\t%s\n", pv.Status.Message) + fmt.Fprintf(out, "Source:\n") + + switch { + case pv.Spec.HostPath != nil: + printHostPathVolumeSource(pv.Spec.HostPath, out) + case pv.Spec.GCEPersistentDisk != nil: + printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, out) + case pv.Spec.AWSElasticBlockStore != nil: + printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, out) + case pv.Spec.NFS != nil: + printNFSVolumeSource(pv.Spec.NFS, out) + case pv.Spec.ISCSI != nil: + printISCSIVolumeSource(pv.Spec.ISCSI, out) + case pv.Spec.Glusterfs != nil: + printGlusterfsVolumeSource(pv.Spec.Glusterfs, out) + case pv.Spec.RBD != nil: + printRBDVolumeSource(pv.Spec.RBD, out) + } + + if events != nil { + DescribeEvents(events, out) + } + + return nil + }) +} + +type PersistentVolumeClaimDescriber struct { + client.Interface +} + +func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.PersistentVolumeClaims(namespace) + + pvc, err := c.Get(name) + if err != nil { + return "", err + } + + storage := pvc.Spec.Resources.Requests[api.ResourceStorage] + capacity := "" + accessModes := "" + if pvc.Spec.VolumeName != "" { + accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) + storage = pvc.Status.Capacity[api.ResourceStorage] + capacity = storage.String() + } + + events, _ := d.Events(namespace).Search(pvc) + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", pvc.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace) + fmt.Fprintf(out, "Status:\t%v\n", pvc.Status.Phase) + fmt.Fprintf(out, "Volume:\t%s\n", pvc.Spec.VolumeName) + printLabelsMultiline(out, "Labels", pvc.Labels) + fmt.Fprintf(out, "Capacity:\t%s\n", capacity) + fmt.Fprintf(out, "Access Modes:\t%s\n", accessModes) + if events != nil { + DescribeEvents(events, out) + } + + return nil + }) +} + +// TODO: Do a better job at indenting, maybe by using a prefix writer +func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) { + statuses := map[string]api.ContainerStatus{} + for _, status := range containerStatuses { + statuses[status.Name] = status + } + if len(containers) == 0 { + fmt.Fprintf(out, "%s%s: \n", space, label) + } else { + fmt.Fprintf(out, "%s%s:\n", space, label) + } + for _, container := range containers { + status, ok := statuses[container.Name] + nameIndent := "" + if len(space) > 0 { + nameIndent = " " + } + fmt.Fprintf(out, " %s%v:\n", nameIndent, container.Name) + if ok { + fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID) + } + fmt.Fprintf(out, " Image:\t%s\n", container.Image) + if ok { + fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID) + } + portString := describeContainerPorts(container.Ports) + if strings.Contains(portString, ",") { + fmt.Fprintf(out, " Ports:\t%s\n", portString) + } else { + fmt.Fprintf(out, " Port:\t%s\n", portString) + } + + if len(container.Command) > 0 { + fmt.Fprintf(out, " Command:\n") + for _, c := range container.Command { + fmt.Fprintf(out, " %s\n", c) + } + } + if len(container.Args) > 0 { + fmt.Fprintf(out, " Args:\n") + for _, arg := range container.Args { + fmt.Fprintf(out, " %s\n", arg) + } + } + + resources := container.Resources + if len(resources.Limits) > 0 { + fmt.Fprintf(out, " Limits:\n") + } + for _, name := range SortedResourceNames(resources.Limits) { + quantity := resources.Limits[name] + fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) + } + + if len(resources.Requests) > 0 { + fmt.Fprintf(out, " Requests:\n") + } + for _, name := range SortedResourceNames(resources.Requests) { + quantity := resources.Requests[name] + fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) + } + + if ok { + describeStatus("State", status.State, out) + if status.LastTerminationState.Terminated != nil { + describeStatus("Last State", status.LastTerminationState, out) + } + fmt.Fprintf(out, " Ready:\t%v\n", printBool(status.Ready)) + fmt.Fprintf(out, " Restart Count:\t%d\n", status.RestartCount) + } + + if container.LivenessProbe != nil { + probe := DescribeProbe(container.LivenessProbe) + fmt.Fprintf(out, " Liveness:\t%s\n", probe) + } + if container.ReadinessProbe != nil { + probe := DescribeProbe(container.ReadinessProbe) + fmt.Fprintf(out, " Readiness:\t%s\n", probe) + } + none := "" + if len(container.Env) == 0 { + none = "\t" + } + fmt.Fprintf(out, " Environment Variables:%s\n", none) + for _, e := range container.Env { + if e.ValueFrom == nil { + fmt.Fprintf(out, " %s:\t%s\n", e.Name, e.Value) + continue + } + + switch { + case e.ValueFrom.FieldRef != nil: + var valueFrom string + if resolverFn != nil { + valueFrom = resolverFn(e) + } + fmt.Fprintf(out, " %s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath) + case e.ValueFrom.ResourceFieldRef != nil: + valueFrom, err := fieldpath.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container) + if err != nil { + valueFrom = "" + } + fmt.Fprintf(out, " %s:\t%s (%s)\n", e.Name, valueFrom, e.ValueFrom.ResourceFieldRef.Resource) + case e.ValueFrom.SecretKeyRef != nil: + fmt.Fprintf(out, " %s:\t\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name) + case e.ValueFrom.ConfigMapKeyRef != nil: + fmt.Fprintf(out, " %s:\t\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name) + } + } + } +} + +func describeContainerPorts(cPorts []api.ContainerPort) string { + ports := make([]string, 0, len(cPorts)) + for _, cPort := range cPorts { + ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) + } + return strings.Join(ports, ", ") +} + +// DescribeProbe is exported for consumers in other API groups that have probes +func DescribeProbe(probe *api.Probe) string { + attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) + switch { + case probe.Exec != nil: + return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs) + case probe.HTTPGet != nil: + url := &url.URL{} + url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme)) + if len(probe.HTTPGet.Port.String()) > 0 { + url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String()) + } else { + url.Host = probe.HTTPGet.Host + } + url.Path = probe.HTTPGet.Path + return fmt.Sprintf("http-get %s %s", url.String(), attrs) + case probe.TCPSocket != nil: + return fmt.Sprintf("tcp-socket :%s %s", probe.TCPSocket.Port.String(), attrs) + } + return fmt.Sprintf("unknown %s", attrs) +} + +type EnvVarResolverFunc func(e api.EnvVar) string + +// EnvValueFrom is exported for use by describers in other packages +func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { + return func(e api.EnvVar) string { + internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(e.ValueFrom.FieldRef.APIVersion, "Pod", e.ValueFrom.FieldRef.FieldPath, "") + if err != nil { + return "" // pod validation should catch this on create + } + + valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) + if err != nil { + return "" // pod validation should catch this on create + } + + return valueFrom + } +} + +func describeStatus(stateName string, state api.ContainerState, out io.Writer) { + switch { + case state.Running != nil: + fmt.Fprintf(out, " %s:\tRunning\n", stateName) + fmt.Fprintf(out, " Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z)) + case state.Waiting != nil: + fmt.Fprintf(out, " %s:\tWaiting\n", stateName) + if state.Waiting.Reason != "" { + fmt.Fprintf(out, " Reason:\t%s\n", state.Waiting.Reason) + } + case state.Terminated != nil: + fmt.Fprintf(out, " %s:\tTerminated\n", stateName) + if state.Terminated.Reason != "" { + fmt.Fprintf(out, " Reason:\t%s\n", state.Terminated.Reason) + } + if state.Terminated.Message != "" { + fmt.Fprintf(out, " Message:\t%s\n", state.Terminated.Message) + } + fmt.Fprintf(out, " Exit Code:\t%d\n", state.Terminated.ExitCode) + if state.Terminated.Signal > 0 { + fmt.Fprintf(out, " Signal:\t%d\n", state.Terminated.Signal) + } + fmt.Fprintf(out, " Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, " Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z)) + default: + fmt.Fprintf(out, " %s:\tWaiting\n", stateName) + } +} + +func printBool(value bool) string { + if value { + return "True" + } + + return "False" +} + +// ReplicationControllerDescriber generates information about a replication controller +// and the pods it has created. +type ReplicationControllerDescriber struct { + client.Interface +} + +func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + rc := d.ReplicationControllers(namespace) + pc := d.Pods(namespace) + + controller, err := rc.Get(name) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector)) + if err != nil { + return "", err + } + + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(controller) + } + + return describeReplicationController(controller, events, running, waiting, succeeded, failed) +} + +func describeReplicationController(controller *api.ReplicationController, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", controller.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", controller.Namespace) + if controller.Spec.Template != nil { + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&controller.Spec.Template.Spec)) + } else { + fmt.Fprintf(out, "Image(s):\t%s\n", "") + } + fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) + printLabelsMultiline(out, "Labels", controller.Labels) + fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + if controller.Spec.Template != nil { + describeVolumes(controller.Spec.Template.Spec.Volumes, out, "") + } + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +func DescribePodTemplate(template *api.PodTemplateSpec, out io.Writer) { + if template == nil { + fmt.Fprintf(out, " ") + return + } + printLabelsMultiline(out, " Labels", template.Labels) + if len(template.Annotations) > 0 { + printLabelsMultiline(out, " Annotations", template.Annotations) + } + if len(template.Spec.ServiceAccountName) > 0 { + fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName) + } + if len(template.Spec.InitContainers) > 0 { + describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, out, " ") + } + describeContainers("Containers", template.Spec.Containers, nil, nil, out, " ") + describeVolumes(template.Spec.Volumes, out, " ") +} + +// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. +type ReplicaSetDescriber struct { + client.Interface +} + +func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + rsc := d.Extensions().ReplicaSets(namespace) + pc := d.Pods(namespace) + + rs, err := rsc.Get(name) + if err != nil { + return "", err + } + + selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) + if err != nil { + return "", err + } + + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(rs) + } + + return describeReplicaSet(rs, events, running, waiting, succeeded, failed) +} + +func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", rs.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", rs.Namespace) + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&rs.Spec.Template.Spec)) + fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(rs.Spec.Selector)) + printLabelsMultiline(out, "Labels", rs.Labels) + fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + describeVolumes(rs.Spec.Template.Spec.Volumes, out, "") + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +// JobDescriber generates information about a job and the pods it has created. +type JobDescriber struct { + client *client.Client +} + +func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + job, err := d.client.Extensions().Jobs(namespace).Get(name) + if err != nil { + return "", err + } + + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.client.Events(namespace).Search(job) + } + + return describeJob(job, events) +} + +func describeJob(job *batch.Job, events *api.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", job.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", job.Namespace) + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&job.Spec.Template.Spec)) + selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) + fmt.Fprintf(out, "Selector:\t%s\n", selector) + fmt.Fprintf(out, "Parallelism:\t%d\n", *job.Spec.Parallelism) + if job.Spec.Completions != nil { + fmt.Fprintf(out, "Completions:\t%d\n", *job.Spec.Completions) + } else { + fmt.Fprintf(out, "Completions:\t\n") + } + if job.Status.StartTime != nil { + fmt.Fprintf(out, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z)) + } + if job.Spec.ActiveDeadlineSeconds != nil { + fmt.Fprintf(out, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds) + } + printLabelsMultiline(out, "Labels", job.Labels) + fmt.Fprintf(out, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed) + describeVolumes(job.Spec.Template.Spec.Volumes, out, "") + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +// DaemonSetDescriber generates information about a daemon set and the pods it has created. +type DaemonSetDescriber struct { + client.Interface +} + +func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + dc := d.Extensions().DaemonSets(namespace) + pc := d.Pods(namespace) + + daemon, err := dc.Get(name) + if err != nil { + return "", err + } + + selector, err := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) + if err != nil { + return "", err + } + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) + if err != nil { + return "", err + } + + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(daemon) + } + + return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) +} + +func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", daemon.Name) + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&daemon.Spec.Template.Spec)) + selector, err := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) + if err != nil { + // this shouldn't happen if LabelSelector passed validation + return err + } + fmt.Fprintf(out, "Selector:\t%s\n", selector) + fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) + printLabelsMultiline(out, "Labels", daemon.Labels) + fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) + fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) + fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +// SecretDescriber generates information about a secret +type SecretDescriber struct { + client.Interface +} + +func (d *SecretDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.Secrets(namespace) + + secret, err := c.Get(name) + if err != nil { + return "", err + } + + return describeSecret(secret) +} + +func describeSecret(secret *api.Secret) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", secret.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", secret.Namespace) + printLabelsMultiline(out, "Labels", secret.Labels) + printLabelsMultiline(out, "Annotations", secret.Annotations) + + fmt.Fprintf(out, "\nType:\t%s\n", secret.Type) + + fmt.Fprintf(out, "\nData\n====\n") + for k, v := range secret.Data { + switch { + case k == api.ServiceAccountTokenKey && secret.Type == api.SecretTypeServiceAccountToken, + k == api.DockerConfigKey && secret.Type == api.SecretTypeDockercfg: + fmt.Fprintf(out, "%s:\t%s\n", k, string(v)) + default: + fmt.Fprintf(out, "%s:\t%d bytes\n", k, len(v)) + } + } + + return nil + }) +} + +type IngressDescriber struct { + client.Interface +} + +func (i *IngressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := i.Extensions().Ingress(namespace) + ing, err := c.Get(name) + if err != nil { + return "", err + } + return i.describeIngress(ing, describerSettings) +} + +func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { + endpoints, _ := i.Endpoints(ns).Get(backend.ServiceName) + service, _ := i.Services(ns).Get(backend.ServiceName) + spName := "" + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + switch backend.ServicePort.Type { + case intstr.String: + if backend.ServicePort.StrVal == sp.Name { + spName = sp.Name + } + case intstr.Int: + if int32(backend.ServicePort.IntVal) == sp.Port { + spName = sp.Name + } + } + } + return formatEndpoints(endpoints, sets.NewString(spName)) +} + +func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings DescriberSettings) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%v\n", ing.Name) + fmt.Fprintf(out, "Namespace:\t%v\n", ing.Namespace) + fmt.Fprintf(out, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true)) + def := ing.Spec.Backend + ns := ing.Namespace + if def == nil { + // Ingresses that don't specify a default backend inherit the + // default backend in the kube-system namespace. + def = &extensions.IngressBackend{ + ServiceName: "default-http-backend", + ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, + } + ns = api.NamespaceSystem + } + fmt.Fprintf(out, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def)) + if len(ing.Spec.TLS) != 0 { + describeIngressTLS(out, ing.Spec.TLS) + } + fmt.Fprint(out, "Rules:\n Host\tPath\tBackends\n") + fmt.Fprint(out, " ----\t----\t--------\n") + count := 0 + for _, rules := range ing.Spec.Rules { + if rules.HTTP == nil { + continue + } + count++ + host := rules.Host + if len(host) == 0 { + host = "*" + } + fmt.Fprintf(out, " %s\t\n", host) + for _, path := range rules.HTTP.Paths { + fmt.Fprintf(out, " \t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ns, &path.Backend)) + } + } + if count == 0 { + fmt.Fprintf(out, " %s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def)) + } + describeIngressAnnotations(out, ing.Annotations) + + if describerSettings.ShowEvents { + events, _ := i.Events(ing.Namespace).Search(ing) + if events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + +func describeIngressTLS(out io.Writer, ingTLS []extensions.IngressTLS) { + fmt.Fprintf(out, "TLS:\n") + for _, t := range ingTLS { + if t.SecretName == "" { + fmt.Fprintf(out, " SNI routes %v\n", strings.Join(t.Hosts, ",")) + } else { + fmt.Fprintf(out, " %v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) + } + } + return +} + +// TODO: Move from annotations into Ingress status. +func describeIngressAnnotations(out io.Writer, annotations map[string]string) { + fmt.Fprintf(out, "Annotations:\n") + for k, v := range annotations { + if !strings.HasPrefix(k, "ingress") { + continue + } + parts := strings.Split(k, "/") + name := parts[len(parts)-1] + fmt.Fprintf(out, " %v:\t%s\n", name, v) + } + return +} + +// ServiceDescriber generates information about a service. +type ServiceDescriber struct { + client.Interface +} + +func (d *ServiceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.Services(namespace) + + service, err := c.Get(name) + if err != nil { + return "", err + } + + endpoints, _ := d.Endpoints(namespace).Get(name) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(service) + } + return describeService(service, endpoints, events) +} + +func buildIngressString(ingress []api.LoadBalancerIngress) string { + var buffer bytes.Buffer + + for i := range ingress { + if i != 0 { + buffer.WriteString(", ") + } + if ingress[i].IP != "" { + buffer.WriteString(ingress[i].IP) + } else { + buffer.WriteString(ingress[i].Hostname) + } + } + return buffer.String() +} + +func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) { + if endpoints == nil { + endpoints = &api.Endpoints{} + } + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", service.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", service.Namespace) + printLabelsMultiline(out, "Labels", service.Labels) + fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector)) + fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type) + fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP) + if len(service.Status.LoadBalancer.Ingress) > 0 { + list := buildIngressString(service.Status.LoadBalancer.Ingress) + fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list) + } + for i := range service.Spec.Ports { + sp := &service.Spec.Ports[i] + + name := sp.Name + if name == "" { + name = "" + } + fmt.Fprintf(out, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) + if sp.NodePort != 0 { + fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) + } + fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) + } + fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +// EndpointsDescriber generates information about an Endpoint. +type EndpointsDescriber struct { + client.Interface +} + +func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.Endpoints(namespace) + + ep, err := c.Get(name) + if err != nil { + return "", err + } + + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Events(namespace).Search(ep) + } + + return describeEndpoints(ep, events) +} + +func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", ep.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", ep.Namespace) + printLabelsMultiline(out, "Labels", ep.Labels) + + fmt.Fprintf(out, "Subsets:\n") + for i := range ep.Subsets { + subset := &ep.Subsets[i] + + addresses := make([]string, 0, len(subset.Addresses)) + for _, addr := range subset.Addresses { + addresses = append(addresses, addr.IP) + } + addressesString := strings.Join(addresses, ",") + if len(addressesString) == 0 { + addressesString = "" + } + fmt.Fprintf(out, " Addresses:\t%s\n", addressesString) + + notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses)) + for _, addr := range subset.NotReadyAddresses { + notReadyAddresses = append(notReadyAddresses, addr.IP) + } + notReadyAddressesString := strings.Join(notReadyAddresses, ",") + if len(notReadyAddressesString) == 0 { + notReadyAddressesString = "" + } + fmt.Fprintf(out, " NotReadyAddresses:\t%s\n", notReadyAddressesString) + + if len(subset.Ports) > 0 { + fmt.Fprintf(out, " Ports:\n") + fmt.Fprintf(out, " Name\tPort\tProtocol\n") + fmt.Fprintf(out, " ----\t----\t--------\n") + for _, port := range subset.Ports { + name := port.Name + if len(name) == 0 { + name = "" + } + fmt.Fprintf(out, " %s\t%d\t%s\n", name, port.Port, port.Protocol) + } + } + fmt.Fprintf(out, "\n") + } + + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +// ServiceAccountDescriber generates information about a service. +type ServiceAccountDescriber struct { + client.Interface +} + +func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.ServiceAccounts(namespace) + + serviceAccount, err := c.Get(name) + if err != nil { + return "", err + } + + tokens := []api.Secret{} + + tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) + options := api.ListOptions{FieldSelector: tokenSelector} + secrets, err := d.Secrets(namespace).List(options) + if err == nil { + for _, s := range secrets.Items { + name, _ := s.Annotations[api.ServiceAccountNameKey] + uid, _ := s.Annotations[api.ServiceAccountUIDKey] + if name == serviceAccount.Name && uid == string(serviceAccount.UID) { + tokens = append(tokens, s) + } + } + } + + return describeServiceAccount(serviceAccount, tokens) +} + +func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", serviceAccount.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", serviceAccount.Namespace) + printLabelsMultiline(out, "Labels", serviceAccount.Labels) + fmt.Fprintln(out) + + var ( + emptyHeader = " " + pullHeader = "Image pull secrets:" + mountHeader = "Mountable secrets: " + tokenHeader = "Tokens: " + + pullSecretNames = []string{} + mountSecretNames = []string{} + tokenSecretNames = []string{} + ) + + for _, s := range serviceAccount.ImagePullSecrets { + pullSecretNames = append(pullSecretNames, s.Name) + } + for _, s := range serviceAccount.Secrets { + mountSecretNames = append(mountSecretNames, s.Name) + } + for _, s := range tokens { + tokenSecretNames = append(tokenSecretNames, s.Name) + } + + types := map[string][]string{ + pullHeader: pullSecretNames, + mountHeader: mountSecretNames, + tokenHeader: tokenSecretNames, + } + for header, names := range types { + if len(names) == 0 { + fmt.Fprintf(out, "%s\t\n", header) + } else { + prefix := header + for _, name := range names { + fmt.Fprintf(out, "%s\t%s\n", prefix, name) + prefix = emptyHeader + } + } + fmt.Fprintln(out) + } + + return nil + }) +} + +// NodeDescriber generates information about a node. +type NodeDescriber struct { + client.Interface +} + +func (d *NodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + mc := d.Nodes() + node, err := mc.Get(name) + if err != nil { + return "", err + } + + fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) + if err != nil { + return "", err + } + // in a policy aware setting, users may have access to a node, but not all pods + // in that case, we note that the user does not have access to the pods + canViewPods := true + nodeNonTerminatedPodsList, err := d.Pods(namespace).List(api.ListOptions{FieldSelector: fieldSelector}) + if err != nil { + if !errors.IsForbidden(err) { + return "", err + } + canViewPods = false + } + + var events *api.EventList + if describerSettings.ShowEvents { + if ref, err := api.GetReference(node); err != nil { + glog.Errorf("Unable to construct reference to '%#v': %v", node, err) + } else { + // TODO: We haven't decided the namespace for Node object yet. + ref.UID = types.UID(ref.Name) + events, _ = d.Events("").Search(ref) + } + } + + return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) +} + +func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", node.Name) + printLabelsMultiline(out, "Labels", node.Labels) + printTaintsInAnnotationMultiline(out, "Taints", node.Annotations) + fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase) + if len(node.Status.Conditions) > 0 { + fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") + fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") + for _, c := range node.Status.Conditions { + fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", + c.Type, + c.Status, + c.LastHeartbeatTime.Time.Format(time.RFC1123Z), + c.LastTransitionTime.Time.Format(time.RFC1123Z), + c.Reason, + c.Message) + } + } + addresses := make([]string, 0, len(node.Status.Addresses)) + for _, address := range node.Status.Addresses { + addresses = append(addresses, address.Address) + } + + printResourceList := func(resourceList api.ResourceList) { + resources := make([]api.ResourceName, 0, len(resourceList)) + for resource := range resourceList { + resources = append(resources, resource) + } + sort.Sort(SortableResourceNames(resources)) + for _, resource := range resources { + value := resourceList[resource] + fmt.Fprintf(out, " %s:\t%s\n", resource, value.String()) + } + } + + fmt.Fprintf(out, "Addresses:\t%s\n", strings.Join(addresses, ",")) + if len(node.Status.Capacity) > 0 { + fmt.Fprintf(out, "Capacity:\n") + printResourceList(node.Status.Capacity) + } + if len(node.Status.Allocatable) > 0 { + fmt.Fprintf(out, "Allocatable:\n") + printResourceList(node.Status.Allocatable) + } + + fmt.Fprintf(out, "System Info:\n") + fmt.Fprintf(out, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) + fmt.Fprintf(out, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) + fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) + fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) + fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage) + fmt.Fprintf(out, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem) + fmt.Fprintf(out, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture) + fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) + fmt.Fprintf(out, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion) + fmt.Fprintf(out, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion) + + if len(node.Spec.PodCIDR) > 0 { + fmt.Fprintf(out, "PodCIDR:\t%s\n", node.Spec.PodCIDR) + } + if len(node.Spec.ExternalID) > 0 { + fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID) + } + if canViewPods && nodeNonTerminatedPodsList != nil { + if err := describeNodeResource(nodeNonTerminatedPodsList, node, out); err != nil { + return err + } + } else { + fmt.Fprintf(out, "Pods:\tnot authorized\n") + } + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + +type PetSetDescriber struct { + client *client.Client +} + +func (p *PetSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + ps, err := p.client.Apps().PetSets(namespace).Get(name) + if err != nil { + return "", err + } + pc := p.client.Pods(namespace) + + selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) + if err != nil { + return "", err + } + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", ps.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", ps.Namespace) + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&ps.Spec.Template.Spec)) + fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(ps.Spec.Selector)) + fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(ps.Labels)) + fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", ps.Status.Replicas, ps.Spec.Replicas) + fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(ps.Annotations)) + fmt.Fprintf(out, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + describeVolumes(ps.Spec.Template.Spec.Volumes, out, "") + if describerSettings.ShowEvents { + events, _ := p.client.Events(namespace).Search(ps) + if events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + +// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. +type HorizontalPodAutoscalerDescriber struct { + client *client.Client +} + +func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name) + if err != nil { + return "", err + } + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", hpa.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", hpa.Namespace) + printLabelsMultiline(out, "Labels", hpa.Labels) + printLabelsMultiline(out, "Annotations", hpa.Annotations) + fmt.Fprintf(out, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, "Reference:\t%s/%s\n", + hpa.Spec.ScaleTargetRef.Kind, + hpa.Spec.ScaleTargetRef.Name) + if hpa.Spec.TargetCPUUtilizationPercentage != nil { + fmt.Fprintf(out, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage) + fmt.Fprintf(out, "Current CPU utilization:\t") + if hpa.Status.CurrentCPUUtilizationPercentage != nil { + fmt.Fprintf(out, "%d%%\n", *hpa.Status.CurrentCPUUtilizationPercentage) + } else { + fmt.Fprintf(out, "\n") + } + } + minReplicas := "" + if hpa.Spec.MinReplicas != nil { + minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) + } + fmt.Fprintf(out, "Min replicas:\t%s\n", minReplicas) + fmt.Fprintf(out, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) + + // TODO: switch to scale subresource once the required code is submitted. + if strings.ToLower(hpa.Spec.ScaleTargetRef.Kind) == "replicationcontroller" { + fmt.Fprintf(out, "ReplicationController pods:\t") + rc, err := d.client.ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name) + if err == nil { + fmt.Fprintf(out, "%d current / %d desired\n", rc.Status.Replicas, rc.Spec.Replicas) + } else { + fmt.Fprintf(out, "failed to check Replication Controller\n") + } + } + + if describerSettings.ShowEvents { + events, _ := d.client.Events(namespace).Search(hpa) + if events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + +func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, out io.Writer) error { + fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) + fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, " ---------\t----\t\t------------\t----------\t---------------\t-------------\n") + allocatable := node.Status.Capacity + if len(node.Status.Allocatable) > 0 { + allocatable = node.Status.Allocatable + } + + for _, pod := range nodeNonTerminatedPodsList.Items { + req, limit, err := api.PodRequestsAndLimits(&pod) + if err != nil { + return err + } + cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] + fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 + fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 + fmt.Fprintf(out, " %s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, + cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) + } + + fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") + fmt.Fprint(out, " ------------\t----------\t---------------\t-------------\n") + reqs, limits, err := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) + if err != nil { + return err + } + cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] + fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 + fractionMemoryReqs := float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 + fractionMemoryLimits := float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 + fmt.Fprintf(out, " %s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", + cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits), + memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) + return nil +} + +func filterTerminatedPods(pods []*api.Pod) []*api.Pod { + if len(pods) == 0 { + return pods + } + result := []*api.Pod{} + for _, pod := range pods { + if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { + continue + } + result = append(result, pod) + } + return result +} + +func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { + reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} + for _, pod := range podList.Items { + podReqs, podLimits, err := api.PodRequestsAndLimits(&pod) + if err != nil { + return nil, nil, err + } + for podReqName, podReqValue := range podReqs { + if value, ok := reqs[podReqName]; !ok { + reqs[podReqName] = *podReqValue.Copy() + } else { + value.Add(podReqValue) + reqs[podReqName] = value + } + } + for podLimitName, podLimitValue := range podLimits { + if value, ok := limits[podLimitName]; !ok { + limits[podLimitName] = *podLimitValue.Copy() + } else { + value.Add(podLimitValue) + limits[podLimitName] = value + } + } + } + return +} + +func DescribeEvents(el *api.EventList, w io.Writer) { + if len(el.Items) == 0 { + fmt.Fprint(w, "No events.") + return + } + sort.Sort(SortableEvents(el.Items)) + fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tType\tReason\tMessage\n") + fmt.Fprint(w, " ---------\t--------\t-----\t----\t-------------\t--------\t------\t-------\n") + for _, e := range el.Items { + fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\t%v\n", + translateTimestamp(e.FirstTimestamp), + translateTimestamp(e.LastTimestamp), + e.Count, + e.Source, + e.InvolvedObject.FieldPath, + e.Type, + e.Reason, + e.Message) + } +} + +// DeploymentDescriber generates information about a deployment. +type DeploymentDescriber struct { + clientset.Interface +} + +func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + d, err := dd.Extensions().Deployments(namespace).Get(name) + if err != nil { + return "", err + } + selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return "", err + } + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", d.ObjectMeta.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", d.ObjectMeta.Namespace) + fmt.Fprintf(out, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z)) + printLabelsMultiline(out, "Labels", d.Labels) + fmt.Fprintf(out, "Selector:\t%s\n", selector) + fmt.Fprintf(out, "Replicas:\t%d updated | %d total | %d available | %d unavailable\n", d.Status.UpdatedReplicas, d.Spec.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas) + fmt.Fprintf(out, "StrategyType:\t%s\n", d.Spec.Strategy.Type) + fmt.Fprintf(out, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds) + if d.Spec.Strategy.RollingUpdate != nil { + ru := d.Spec.Strategy.RollingUpdate + fmt.Fprintf(out, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String()) + } + oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd) + if err == nil { + fmt.Fprintf(out, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) + var newRSs []*extensions.ReplicaSet + if newRS != nil { + newRSs = append(newRSs, newRS) + } + fmt.Fprintf(out, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs)) + } + if describerSettings.ShowEvents { + events, err := dd.Core().Events(namespace).Search(d) + if err == nil && events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + +// Get all daemon set whose selectors would match a given set of labels. +// TODO: Move this to pkg/client and ideally implement it server-side (instead +// of getting all DS's and searching through them manually). +// TODO: write an interface for controllers and fuse getReplicationControllersForLabels +// and getDaemonSetsForLabels. +func getDaemonSetsForLabels(c client.DaemonSetInterface, labelsToMatch labels.Labels) ([]extensions.DaemonSet, error) { + // Get all daemon sets + // TODO: this needs a namespace scope as argument + dss, err := c.List(api.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("error getting daemon set: %v", err) + } + + // Find the ones that match labelsToMatch. + var matchingDaemonSets []extensions.DaemonSet + for _, ds := range dss.Items { + selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + // this should never happen if the DaemonSet passed validation + return nil, err + } + if selector.Matches(labelsToMatch) { + matchingDaemonSets = append(matchingDaemonSets, ds) + } + } + return matchingDaemonSets, nil +} + +func printReplicationControllersByLabels(matchingRCs []*api.ReplicationController) string { + // Format the matching RC's into strings. + rcStrings := make([]string, 0, len(matchingRCs)) + for _, controller := range matchingRCs { + rcStrings = append(rcStrings, fmt.Sprintf("%s (%d/%d replicas created)", controller.Name, controller.Status.Replicas, controller.Spec.Replicas)) + } + + list := strings.Join(rcStrings, ", ") + if list == "" { + return "" + } + return list +} + +func printReplicaSetsByLabels(matchingRSs []*extensions.ReplicaSet) string { + // Format the matching ReplicaSets into strings. + rsStrings := make([]string, 0, len(matchingRSs)) + for _, rs := range matchingRSs { + rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, rs.Spec.Replicas)) + } + + list := strings.Join(rsStrings, ", ") + if list == "" { + return "" + } + return list +} + +func getPodStatusForController(c client.PodInterface, selector labels.Selector) (running, waiting, succeeded, failed int, err error) { + options := api.ListOptions{LabelSelector: selector} + rcPods, err := c.List(options) + if err != nil { + return + } + for _, pod := range rcPods.Items { + switch pod.Status.Phase { + case api.PodRunning: + running++ + case api.PodPending: + waiting++ + case api.PodSucceeded: + succeeded++ + case api.PodFailed: + failed++ + } + } + return +} + +// ConfigMapDescriber generates information about a ConfigMap +type ConfigMapDescriber struct { + client.Interface +} + +func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.ConfigMaps(namespace) + + configMap, err := c.Get(name) + if err != nil { + return "", err + } + + return describeConfigMap(configMap) +} + +func describeConfigMap(configMap *api.ConfigMap) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", configMap.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", configMap.Namespace) + printLabelsMultiline(out, "Labels", configMap.Labels) + printLabelsMultiline(out, "Annotations", configMap.Annotations) + + fmt.Fprintf(out, "\nData\n====\n") + for k, v := range configMap.Data { + fmt.Fprintf(out, "%s:\t%d bytes\n", k, len(v)) + } + + return nil + }) +} + +type ClusterDescriber struct { + fed_clientset.Interface +} + +func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + cluster, err := d.Federation().Clusters().Get(name) + if err != nil { + return "", err + } + return describeCluster(cluster) +} + +func describeCluster(cluster *federation.Cluster) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", cluster.Name) + fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(cluster.Labels)) + + fmt.Fprintf(out, "ServerAddressByClientCIDRs:\n ClientCIDR\tServerAddress\n") + fmt.Fprintf(out, " ----\t----\n") + for _, cidrAddr := range cluster.Spec.ServerAddressByClientCIDRs { + fmt.Fprintf(out, " %v \t%v\n\n", cidrAddr.ClientCIDR, cidrAddr.ServerAddress) + } + + if len(cluster.Status.Conditions) > 0 { + fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastUpdateTime\tLastTransitionTime\tReason\tMessage\n") + fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") + for _, c := range cluster.Status.Conditions { + fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", + c.Type, + c.Status, + c.LastProbeTime.Time.Format(time.RFC1123Z), + c.LastTransitionTime.Time.Format(time.RFC1123Z), + c.Reason, + c.Message) + } + } + return nil + }) +} + +// NetworkPolicyDescriber generates information about a NetworkPolicy +type NetworkPolicyDescriber struct { + client.Interface +} + +func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + c := d.Extensions().NetworkPolicies(namespace) + + networkPolicy, err := c.Get(name) + if err != nil { + return "", err + } + + return describeNetworkPolicy(networkPolicy) +} + +func describeNetworkPolicy(networkPolicy *extensions.NetworkPolicy) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", networkPolicy.Name) + fmt.Fprintf(out, "Namespace:\t%s\n", networkPolicy.Namespace) + printLabelsMultiline(out, "Labels", networkPolicy.Labels) + printLabelsMultiline(out, "Annotations", networkPolicy.Annotations) + + return nil + }) +} + +// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. +func newErrNoDescriber(types ...reflect.Type) error { + names := make([]string, 0, len(types)) + for _, t := range types { + names = append(names, t.String()) + } + return ErrNoDescriber{Types: names} +} + +// Describers implements ObjectDescriber against functions registered via Add. Those functions can +// be strongly typed. Types are exactly matched (no conversion or assignable checks). +type Describers struct { + searchFns map[reflect.Type][]typeFunc +} + +// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string, +// if at least one describer function has been registered with the exact types passed, or if any +// describer can print the exact object in its first argument (the remainder will be provided empty +// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will +// be returned +// TODO: reorder and partial match extra. +func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) { + exactType := reflect.TypeOf(exact) + fns, ok := d.searchFns[exactType] + if !ok { + return "", newErrNoDescriber(exactType) + } + if len(extra) == 0 { + for _, typeFn := range fns { + if len(typeFn.Extra) == 0 { + return typeFn.Describe(exact, extra...) + } + } + typeFn := fns[0] + for _, t := range typeFn.Extra { + v := reflect.New(t).Elem() + extra = append(extra, v.Interface()) + } + return fns[0].Describe(exact, extra...) + } + + types := make([]reflect.Type, 0, len(extra)) + for _, obj := range extra { + types = append(types, reflect.TypeOf(obj)) + } + for _, typeFn := range fns { + if typeFn.Matches(types) { + return typeFn.Describe(exact, extra...) + } + } + return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) +} + +// Add adds one or more describer functions to the Describer. The passed function must +// match the signature: +// +// func(...) (string, error) +// +// Any number of arguments may be provided. +func (d *Describers) Add(fns ...interface{}) error { + for _, fn := range fns { + fv := reflect.ValueOf(fn) + ft := fv.Type() + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + numIn := ft.NumIn() + if numIn == 0 { + return fmt.Errorf("expected at least one 'in' params, got: %v", ft) + } + if ft.NumOut() != 2 { + return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft) + } + types := make([]reflect.Type, 0, numIn) + for i := 0; i < numIn; i++ { + types = append(types, ft.In(i)) + } + if ft.Out(0) != reflect.TypeOf(string("")) { + return fmt.Errorf("expected string return, got: %v", ft) + } + var forErrorType error + // This convolution is necessary, otherwise TypeOf picks up on the fact + // that forErrorType is nil. + errorType := reflect.TypeOf(&forErrorType).Elem() + if ft.Out(1) != errorType { + return fmt.Errorf("expected error return, got: %v", ft) + } + + exact := types[0] + extra := types[1:] + if d.searchFns == nil { + d.searchFns = make(map[reflect.Type][]typeFunc) + } + fns := d.searchFns[exact] + fn := typeFunc{Extra: extra, Fn: fv} + fns = append(fns, fn) + d.searchFns[exact] = fns + } + return nil +} + +// typeFunc holds information about a describer function and the types it accepts +type typeFunc struct { + Extra []reflect.Type + Fn reflect.Value +} + +// Matches returns true when the passed types exactly match the Extra list. +func (fn typeFunc) Matches(types []reflect.Type) bool { + if len(fn.Extra) != len(types) { + return false + } + // reorder the items in array types and fn.Extra + // convert the type into string and sort them, check if they are matched + varMap := make(map[reflect.Type]bool) + for i := range fn.Extra { + varMap[fn.Extra[i]] = true + } + for i := range types { + if _, found := varMap[types[i]]; !found { + return false + } + } + return true +} + +// Describe invokes the nested function with the exact number of arguments. +func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) { + values := []reflect.Value{reflect.ValueOf(exact)} + for _, obj := range extra { + values = append(values, reflect.ValueOf(obj)) + } + out := fn.Fn.Call(values) + s := out[0].Interface().(string) + var err error + if !out[1].IsNil() { + err = out[1].Interface().(error) + } + return s, err +} + +// printLabelsMultiline prints multiple labels with a proper alignment. +func printLabelsMultiline(out io.Writer, title string, labels map[string]string) { + printLabelsMultilineWithIndent(out, "", title, "\t", labels) +} + +// printLabelsMultiline prints multiple labels with a user-defined alignment. +func printLabelsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, labels map[string]string) { + + fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) + + if labels == nil || len(labels) == 0 { + fmt.Fprintln(out, "") + return + } + + // to print labels in the sorted order + keys := make([]string, 0, len(labels)) + for key := range labels { + keys = append(keys, key) + } + sort.Strings(keys) + + for i, key := range keys { + if i != 0 { + fmt.Fprint(out, initialIndent) + fmt.Fprint(out, innerIndent) + } + fmt.Fprintf(out, "%s=%s\n", key, labels[key]) + i++ + } +} + +// printTaintsMultiline prints multiple taints with a proper alignment. +func printTaintsInAnnotationMultiline(out io.Writer, title string, annotations map[string]string) { + taints, err := api.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + taints = []api.Taint{} + } + printTaintsMultilineWithIndent(out, "", title, "\t", taints) +} + +// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. +func printTaintsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, taints []api.Taint) { + fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) + + if taints == nil || len(taints) == 0 { + fmt.Fprintln(out, "") + return + } + + // to print taints in the sorted order + keys := make([]string, 0, len(taints)) + for _, taint := range taints { + keys = append(keys, taint.Key) + } + sort.Strings(keys) + + for i, key := range keys { + for _, taint := range taints { + if taint.Key == key { + if i != 0 { + fmt.Fprint(out, initialIndent) + fmt.Fprint(out, innerIndent) + } + fmt.Fprintf(out, "%s=%s:%s\n", taint.Key, taint.Value, taint.Effect) + i++ + } + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go new file mode 100644 index 00000000..cc34fba7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubectl is a set of libraries that are used by the kubectl command line tool. +// They are separated out into a library to support unit testing. Most functionality should +// be included in this package, and the main kubectl should really just be an entry point. +package kubectl diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go new file mode 100644 index 00000000..d8b9a147 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go @@ -0,0 +1,251 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "io" + "strings" + + "github.com/emicklei/go-restful/swagger" + + "k8s.io/kubernetes/pkg/api/meta" + apiutil "k8s.io/kubernetes/pkg/api/util" +) + +var allModels = make(map[string]*swagger.NamedModel) +var recursive = false // this is global for convenience, can become int for multiple levels + +// SplitAndParseResourceRequest separates the users input into a model and fields +func SplitAndParseResourceRequest(inResource string, mapper meta.RESTMapper) (string, []string, error) { + inResource, fieldsPath := splitDotNotation(inResource) + inResource, _ = mapper.ResourceSingularizer(inResource) + return inResource, fieldsPath, nil +} + +// PrintModelDescription prints the description of a specific model or dot path +func PrintModelDescription(inModel string, fieldsPath []string, w io.Writer, swaggerSchema *swagger.ApiDeclaration, r bool) error { + recursive = r // this is global for convenience + apiVer := apiutil.GetVersion(swaggerSchema.ApiVersion) + "." + + var pointedModel *swagger.NamedModel + for i := range swaggerSchema.Models.List { + name := swaggerSchema.Models.List[i].Name + + allModels[name] = &swaggerSchema.Models.List[i] + if strings.ToLower(name) == strings.ToLower(apiVer+inModel) { + pointedModel = &swaggerSchema.Models.List[i] + } + } + if pointedModel == nil { + return fmt.Errorf("requested resource %q is not defined", inModel) + } + + if len(fieldsPath) == 0 { + return printTopLevelResourceInfo(w, pointedModel) + } + + var pointedModelAsProp *swagger.NamedModelProperty + for _, field := range fieldsPath { + if prop, nextModel, isModel := getField(pointedModel, field); prop != nil { + if isModel { + pointedModelAsProp = prop + pointedModel = allModels[nextModel] + } else { + return printPrimitive(w, prop) + } + } else { + return fmt.Errorf("field %q does not exist", field) + } + } + return printModelInfo(w, pointedModel, pointedModelAsProp) +} + +func splitDotNotation(model string) (string, []string) { + var fieldsPath []string + dotModel := strings.Split(model, ".") + if len(dotModel) >= 1 { + fieldsPath = dotModel[1:] + } + return dotModel[0], fieldsPath +} + +func getPointedModel(prop *swagger.ModelProperty) (string, bool) { + if prop.Ref != nil { + return *prop.Ref, true + } else if *prop.Type == "array" && prop.Items.Ref != nil { + return *prop.Items.Ref, true + } + return "", false +} + +func getField(model *swagger.NamedModel, sField string) (*swagger.NamedModelProperty, string, bool) { + for _, prop := range model.Model.Properties.List { + if prop.Name == sField { + pointedModel, isModel := getPointedModel(&prop.Property) + return &prop, pointedModel, isModel + } + } + return nil, "", false +} + +func printModelInfo(w io.Writer, model *swagger.NamedModel, modelProp *swagger.NamedModelProperty) error { + t, _ := getFieldType(&modelProp.Property) + fmt.Fprintf(w, "RESOURCE: %s <%s>\n\n", modelProp.Name, t) + fieldDesc, _ := wrapAndIndentText(modelProp.Property.Description, " ", 80) + fmt.Fprintf(w, "DESCRIPTION:\n%s\n\n%s\n", fieldDesc, indentText(model.Model.Description, " ")) + return printFields(w, model) +} + +func printPrimitive(w io.Writer, field *swagger.NamedModelProperty) error { + t, _ := getFieldType(&field.Property) + fmt.Fprintf(w, "FIELD: %s <%s>\n\n", field.Name, t) + d, _ := wrapAndIndentText(field.Property.Description, " ", 80) + fmt.Fprintf(w, "DESCRIPTION:\n%s\n", d) + return nil +} + +func printTopLevelResourceInfo(w io.Writer, model *swagger.NamedModel) error { + fmt.Fprintf(w, "DESCRIPTION:\n%s\n", model.Model.Description) + return printFields(w, model) +} + +func printFields(w io.Writer, model *swagger.NamedModel) error { + fmt.Fprint(w, "\nFIELDS:\n") + for _, field := range model.Model.Properties.List { + fieldType, err := getFieldType(&field.Property) + if err != nil { + return err + } + + if arrayContains(model.Model.Required, field.Name) { + fmt.Fprintf(w, " %s\t<%s> -required-\n", field.Name, fieldType) + } else { + fmt.Fprintf(w, " %s\t<%s>\n", field.Name, fieldType) + } + + if recursive { + pointedModel, isModel := getPointedModel(&field.Property) + if isModel { + for _, nestedField := range allModels[pointedModel].Model.Properties.List { + t, _ := getFieldType(&nestedField.Property) + fmt.Fprintf(w, " %s\t<%s>\n", nestedField.Name, t) + } + } + } else { + fieldDesc, _ := wrapAndIndentText(field.Property.Description, " ", 80) + fmt.Fprintf(w, "%s\n\n", fieldDesc) + } + } + fmt.Fprint(w, "\n") + return nil +} + +func getFieldType(prop *swagger.ModelProperty) (string, error) { + if prop.Type == nil { + return "Object", nil + } else if *prop.Type == "any" { + // Swagger Spec doesn't return information for maps. + return "map[string]string", nil + } else if *prop.Type == "array" { + if prop.Items == nil { + return "", fmt.Errorf("error in swagger spec. Property: %v contains an array without type", prop) + } + if prop.Items.Ref != nil { + fieldType := "[]Object" + return fieldType, nil + } + fieldType := "[]" + *prop.Items.Type + return fieldType, nil + } + return *prop.Type, nil +} + +func wrapAndIndentText(desc, indent string, lim int) (string, error) { + words := strings.Split(strings.Replace(strings.TrimSpace(desc), "\n", " ", -1), " ") + n := len(words) + + for i := 0; i < n; i++ { + if len(words[i]) > lim { + if strings.Contains(words[i], "/") { + s := breakURL(words[i]) + words = append(words[:i], append(s, words[i+1:]...)...) + i = i + len(s) - 1 + } else { + fmt.Println(len(words[i])) + return "", fmt.Errorf("there are words longer that the break limit is") + } + } + } + + var lines []string + line := []string{indent} + lineL := len(indent) + for i := 0; i < len(words); i++ { + w := words[i] + + if strings.HasSuffix(w, "/") && lineL+len(w)-1 < lim { + prev := line[len(line)-1] + if strings.HasSuffix(prev, "/") { + if i+1 < len(words)-1 && !strings.HasSuffix(words[i+1], "/") { + w = strings.TrimSuffix(w, "/") + } + + line[len(line)-1] = prev + w + lineL += len(w) + } else { + line = append(line, w) + lineL += len(w) + 1 + } + } else if lineL+len(w) < lim { + line = append(line, w) + lineL += len(w) + 1 + } else { + lines = append(lines, strings.Join(line, " ")) + line = []string{indent, w} + lineL = len(indent) + len(w) + } + } + lines = append(lines, strings.Join(line, " ")) + + return strings.Join(lines, "\n"), nil +} + +func breakURL(url string) []string { + var buf []string + for _, part := range strings.Split(url, "/") { + buf = append(buf, part+"/") + } + return buf +} + +func indentText(text, indent string) string { + lines := strings.Split(text, "\n") + for i := range lines { + lines[i] = indent + lines[i] + } + return strings.Join(lines, "\n") +} + +func arrayContains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go new file mode 100644 index 00000000..ea254bcb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go @@ -0,0 +1,200 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" +) + +// GeneratorParam is a parameter for a generator +// TODO: facilitate structured json generator input schemes +type GeneratorParam struct { + Name string + Required bool +} + +// Generator is an interface for things that can generate API objects from input parameters. +type Generator interface { + // Generate creates an API object given a set of parameters + Generate(params map[string]interface{}) (runtime.Object, error) + // ParamNames returns the list of parameters that this generator uses + ParamNames() []GeneratorParam +} + +// StructuredGenerator is an interface for things that can generate API objects not using parameter injection +type StructuredGenerator interface { + // StructuredGenerator creates an API object using pre-configured parameters + StructuredGenerate() (runtime.Object, error) +} + +func IsZero(i interface{}) bool { + if i == nil { + return true + } + return reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) +} + +// ValidateParams ensures that all required params are present in the params map +func ValidateParams(paramSpec []GeneratorParam, params map[string]interface{}) error { + allErrs := []error{} + for ix := range paramSpec { + if paramSpec[ix].Required { + value, found := params[paramSpec[ix].Name] + if !found || IsZero(value) { + allErrs = append(allErrs, fmt.Errorf("Parameter: %s is required", paramSpec[ix].Name)) + } + } + } + return utilerrors.NewAggregate(allErrs) +} + +// AnnotateFlags annotates all flags that are used by generators. +func AnnotateFlags(cmd *cobra.Command, generators map[string]Generator) { + // Iterate over all generators and mark any flags used by them. + for name, generator := range generators { + generatorParams := map[string]struct{}{} + for _, param := range generator.ParamNames() { + generatorParams[param.Name] = struct{}{} + } + + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + if _, found := generatorParams[flag.Name]; !found { + // This flag is not used by the current generator + // so skip it. + return + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + if annotations := flag.Annotations["generator"]; annotations == nil { + flag.Annotations["generator"] = []string{} + } + flag.Annotations["generator"] = append(flag.Annotations["generator"], name) + }) + } +} + +// EnsureFlagsValid ensures that no invalid flags are being used against a generator. +func EnsureFlagsValid(cmd *cobra.Command, generators map[string]Generator, generatorInUse string) error { + AnnotateFlags(cmd, generators) + + allErrs := []error{} + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + // If the flag hasn't changed, don't validate it. + if !flag.Changed { + return + } + // Look into the flag annotations for the generators that can use it. + if annotations := flag.Annotations["generator"]; len(annotations) > 0 { + annotationMap := map[string]struct{}{} + for _, ann := range annotations { + annotationMap[ann] = struct{}{} + } + // If the current generator is not annotated, then this flag shouldn't + // be used with it. + if _, found := annotationMap[generatorInUse]; !found { + allErrs = append(allErrs, fmt.Errorf("cannot use --%s with --generator=%s", flag.Name, generatorInUse)) + } + } + }) + return utilerrors.NewAggregate(allErrs) +} + +// MakeParams is a utility that creates generator parameters from a command line +func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]interface{} { + result := map[string]interface{}{} + for ix := range params { + f := cmd.Flags().Lookup(params[ix].Name) + if f != nil { + result[params[ix].Name] = f.Value.String() + } + } + return result +} + +func MakeProtocols(protocols map[string]string) string { + out := []string{} + for key, value := range protocols { + out = append(out, fmt.Sprintf("%s/%s", key, value)) + } + return strings.Join(out, ",") +} + +func ParseProtocols(protocols interface{}) (map[string]string, error) { + protocolsString, isString := protocols.(string) + if !isString { + return nil, fmt.Errorf("expected string, found %v", protocols) + } + if len(protocolsString) == 0 { + return nil, fmt.Errorf("no protocols passed") + } + portProtocolMap := map[string]string{} + protocolsSlice := strings.Split(protocolsString, ",") + for ix := range protocolsSlice { + portProtocol := strings.Split(protocolsSlice[ix], "/") + if len(portProtocol) != 2 { + return nil, fmt.Errorf("unexpected port protocol mapping: %s", protocolsSlice[ix]) + } + portProtocolMap[portProtocol[0]] = portProtocol[1] + } + return portProtocolMap, nil +} + +func MakeLabels(labels map[string]string) string { + out := []string{} + for key, value := range labels { + out = append(out, fmt.Sprintf("%s=%s", key, value)) + } + return strings.Join(out, ",") +} + +// ParseLabels turns a string representation of a label set into a map[string]string +func ParseLabels(labelSpec interface{}) (map[string]string, error) { + labelString, isString := labelSpec.(string) + if !isString { + return nil, fmt.Errorf("expected string, found %v", labelSpec) + } + if len(labelString) == 0 { + return nil, fmt.Errorf("no label spec passed") + } + labels := map[string]string{} + labelSpecs := strings.Split(labelString, ",") + for ix := range labelSpecs { + labelSpec := strings.Split(labelSpecs[ix], "=") + if len(labelSpec) != 2 { + return nil, fmt.Errorf("unexpected label spec: %s", labelSpecs[ix]) + } + labels[labelSpec[0]] = labelSpec[1] + } + return labels, nil +} + +func GetBool(params map[string]string, key string, defValue bool) (bool, error) { + if val, found := params[key]; !found { + return defValue, nil + } else { + return strconv.ParseBool(val) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go new file mode 100644 index 00000000..ec058040 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "fmt" + "io" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/runtime" + deploymentutil "k8s.io/kubernetes/pkg/util/deployment" + sliceutil "k8s.io/kubernetes/pkg/util/slice" +) + +const ( + ChangeCauseAnnotation = "kubernetes.io/change-cause" +) + +// HistoryViewer provides an interface for resources that have historical information. +type HistoryViewer interface { + ViewHistory(namespace, name string, revision int64) (string, error) +} + +func HistoryViewerFor(kind unversioned.GroupKind, c clientset.Interface) (HistoryViewer, error) { + switch kind { + case extensions.Kind("Deployment"): + return &DeploymentHistoryViewer{c}, nil + } + return nil, fmt.Errorf("no history viewer has been implemented for %q", kind) +} + +type DeploymentHistoryViewer struct { + c clientset.Interface +} + +// ViewHistory prints the revision history of a deployment +func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { + deployment, err := h.c.Extensions().Deployments(namespace).Get(name) + if err != nil { + return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) + } + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, h.c) + if err != nil { + return "", fmt.Errorf("failed to retrieve old replica sets from deployment %s: %v", name, err) + } + + historyInfo := make(map[int64]*api.PodTemplateSpec) + for _, rs := range append(allOldRSs, newRS) { + v, err := deploymentutil.Revision(rs) + if err != nil { + continue + } + historyInfo[v] = &rs.Spec.Template + changeCause := getChangeCause(rs) + if historyInfo[v].Annotations == nil { + historyInfo[v].Annotations = make(map[string]string) + } + if len(changeCause) > 0 { + historyInfo[v].Annotations[ChangeCauseAnnotation] = changeCause + } + } + + if len(historyInfo) == 0 { + return "No rollout history found.", nil + } + + if revision > 0 { + // Print details of a specific revision + template, ok := historyInfo[revision] + if !ok { + return "", fmt.Errorf("unable to find the specified revision") + } + buf := bytes.NewBuffer([]byte{}) + DescribePodTemplate(template, buf) + return buf.String(), nil + } + + // Sort the revisionToChangeCause map by revision + var revisions []int64 + for r := range historyInfo { + revisions = append(revisions, r) + } + sliceutil.SortInts64(revisions) + + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") + for _, r := range revisions { + // Find the change-cause of revision r + changeCause := historyInfo[r].Annotations[ChangeCauseAnnotation] + if len(changeCause) == 0 { + changeCause = "" + } + fmt.Fprintf(out, "%d\t%s\n", r, changeCause) + } + return nil + }) +} + +// getChangeCause returns the change-cause annotation of the input object +func getChangeCause(obj runtime.Object) string { + accessor, err := meta.Accessor(obj) + if err != nil { + return "" + } + return accessor.GetAnnotations()[ChangeCauseAnnotation] +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go new file mode 100644 index 00000000..8f1e6f19 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "k8s.io/kubernetes/pkg/api" + client "k8s.io/kubernetes/pkg/client/restclient" +) + +// RESTClient is a client helper for dealing with RESTful resources +// in a generic way. +type RESTClient interface { + Get() *client.Request + Post() *client.Request + Patch(api.PatchType) *client.Request + Delete() *client.Request + Put() *client.Request +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go b/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go new file mode 100644 index 00000000..7897a7b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. +package kubectl + +import ( + "errors" + "fmt" + "path" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" +) + +const ( + kubectlAnnotationPrefix = "kubectl.kubernetes.io/" + // TODO: auto-generate this + PossibleResourceTypes = `Possible resource types include (case insensitive): pods (po), services (svc), deployments, +replicasets (rs), replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), +persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), namespaces (ns), +serviceaccounts (sa), ingresses (ing), horizontalpodautoscalers (hpa), daemonsets (ds), configmaps, +componentstatuses (cs), endpoints (ep), and secrets.` +) + +type NamespaceInfo struct { + Namespace string +} + +func listOfImages(spec *api.PodSpec) []string { + images := make([]string, 0, len(spec.Containers)) + for _, container := range spec.Containers { + images = append(images, container.Image) + } + return images +} + +func makeImageList(spec *api.PodSpec) string { + return strings.Join(listOfImages(spec), ",") +} + +func NewThirdPartyResourceMapper(gvs []unversioned.GroupVersion, gvks []unversioned.GroupVersionKind) (meta.RESTMapper, error) { + mapper := meta.NewDefaultRESTMapper(gvs, func(gv unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + for ix := range gvs { + if gvs[ix].Group == gv.Group && gvs[ix].Version == gv.Version { + return &meta.VersionInterfaces{ + ObjectConvertor: api.Scheme, + MetadataAccessor: meta.NewAccessor(), + }, nil + } + } + groupVersions := make([]string, 0, len(gvs)) + for ix := range gvs { + groupVersions = append(groupVersions, gvs[ix].String()) + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %s)", gv.String(), strings.Join(groupVersions, ", ")) + }) + for ix := range gvks { + mapper.Add(gvks[ix], meta.RESTScopeNamespace) + } + return mapper, nil +} + +// OutputVersionMapper is a RESTMapper that will prefer mappings that +// correspond to a preferred output version (if feasible) +type OutputVersionMapper struct { + meta.RESTMapper + + // output versions takes a list of preferred GroupVersions. Only the first + // hit for a given group will have effect. This allows different output versions + // depending upon the group of the kind being requested + OutputVersions []unversioned.GroupVersion +} + +// RESTMapping implements meta.RESTMapper by prepending the output version to the preferred version list. +func (m OutputVersionMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { + for _, preferredVersion := range m.OutputVersions { + if gk.Group == preferredVersion.Group { + mapping, err := m.RESTMapper.RESTMapping(gk, preferredVersion.Version) + if err == nil { + return mapping, nil + } + + break + } + } + + return m.RESTMapper.RESTMapping(gk, versions...) +} + +// ShortcutExpander is a RESTMapper that can be used for Kubernetes +// resources. It expands the resource first, then invokes the wrapped RESTMapper +type ShortcutExpander struct { + RESTMapper meta.RESTMapper +} + +var _ meta.RESTMapper = &ShortcutExpander{} + +func (e ShortcutExpander) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { + return e.RESTMapper.KindFor(expandResourceShortcut(resource)) +} + +func (e ShortcutExpander) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(expandResourceShortcut(resource)) +} + +func (e ShortcutExpander) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(expandResourceShortcut(resource)) +} + +func (e ShortcutExpander) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(expandResourceShortcut(resource)) +} + +func (e ShortcutExpander) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +func (e ShortcutExpander) RESTMappings(gk unversioned.GroupKind) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk) +} + +func (e ShortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) +} + +func (e ShortcutExpander) AliasesForResource(resource string) ([]string, bool) { + return e.RESTMapper.AliasesForResource(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) +} + +// shortForms is the list of short names to their expanded names +var shortForms = map[string]string{ + // Please keep this alphabetized + // If you add an entry here, please also take a look at pkg/kubectl/cmd/cmd.go + // and add an entry to valid_resources when appropriate. + "cs": "componentstatuses", + "ds": "daemonsets", + "ep": "endpoints", + "ev": "events", + "hpa": "horizontalpodautoscalers", + "ing": "ingresses", + "limits": "limitranges", + "no": "nodes", + "ns": "namespaces", + "po": "pods", + "psp": "podSecurityPolicies", + "pvc": "persistentvolumeclaims", + "pv": "persistentvolumes", + "quota": "resourcequotas", + "rc": "replicationcontrollers", + "rs": "replicasets", + "sa": "serviceaccounts", + "scc": "securitycontextconstraints", + "svc": "services", +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. Otherwise, will return resource unmodified. +func expandResourceShortcut(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { + if expanded, ok := shortForms[resource.Resource]; ok { + // don't change the group or version that's already been specified + resource.Resource = expanded + } + return resource +} + +// ResourceAliases returns the resource shortcuts and plural forms for the given resources. +func ResourceAliases(rs []string) []string { + as := make([]string, 0, len(rs)) + plurals := make(map[string]struct{}, len(rs)) + for _, r := range rs { + var plural string + switch { + case r == "endpoints": + plural = r // exception. "endpoint" does not exist. Why? + case strings.HasSuffix(r, "y"): + plural = r[0:len(r)-1] + "ies" + case strings.HasSuffix(r, "s"): + plural = r + "es" + default: + plural = r + "s" + } + as = append(as, plural) + + plurals[plural] = struct{}{} + } + + for sf, r := range shortForms { + if _, found := plurals[r]; found { + as = append(as, sf) + } + } + return as +} + +// parseFileSource parses the source given. Acceptable formats include: +// +// 1. source-path: the basename will become the key name +// 2. source-name=source-path: the source-name will become the key name and source-path is the path to the key file +// +// Key names cannot include '='. +func parseFileSource(source string) (keyName, filePath string, err error) { + numSeparators := strings.Count(source, "=") + switch { + case numSeparators == 0: + return path.Base(source), source, nil + case numSeparators == 1 && strings.HasPrefix(source, "="): + return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) + case numSeparators == 1 && strings.HasSuffix(source, "="): + return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) + case numSeparators > 1: + return "", "", errors.New("Key names or file paths cannot contain '='.") + default: + components := strings.Split(source, "=") + return components[0], components[1], nil + } +} + +// parseLiteralSource parses the source key=val pair +func parseLiteralSource(source string) (keyName, value string, err error) { + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) + if len(items) != 2 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + + return items[0], items[1], nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go new file mode 100644 index 00000000..c6011d38 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go @@ -0,0 +1,79 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +// NamespaceGeneratorV1 supports stable generation of a namespace +type NamespaceGeneratorV1 struct { + // Name of namespace + Name string +} + +// Ensure it supports the generator pattern that uses parameter injection +var _ Generator = &NamespaceGeneratorV1{} + +// Ensure it supports the generator pattern that uses parameters specified during construction +var _ StructuredGenerator = &NamespaceGeneratorV1{} + +// Generate returns a namespace using the specified parameters +func (g NamespaceGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + err := ValidateParams(g.ParamNames(), genericParams) + if err != nil { + return nil, err + } + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + delegate := &NamespaceGeneratorV1{Name: params["name"]} + return delegate.StructuredGenerate() +} + +// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern +func (g NamespaceGeneratorV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + } +} + +// StructuredGenerate outputs a namespace object using the configured fields +func (g *NamespaceGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := g.validate(); err != nil { + return nil, err + } + namespace := &api.Namespace{} + namespace.Name = g.Name + return namespace, nil +} + +// validate validates required fields are set to support structured generation +func (g *NamespaceGeneratorV1) validate() error { + if len(g.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go new file mode 100644 index 00000000..082b542f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "regexp" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/util" +) + +const ( + DefaultHostAcceptRE = "^localhost$,^127\\.0\\.0\\.1$,^\\[::1\\]$" + DefaultPathAcceptRE = "^/.*" + DefaultPathRejectRE = "^/api/.*/exec,^/api/.*/run,^/api/.*/attach" + DefaultMethodRejectRE = "POST,PUT,PATCH" +) + +var ( + // The reverse proxy will periodically flush the io writer at this frequency. + // Only matters for long poll connections like the one used to watch. With an + // interval of 0 the reverse proxy will buffer content sent on any connection + // with transfer-encoding=chunked. + // TODO: Flush after each chunk so the client doesn't suffer a 100ms latency per + // watch event. + ReverseProxyFlushInterval = 100 * time.Millisecond +) + +// FilterServer rejects requests which don't match one of the specified regular expressions +type FilterServer struct { + // Only paths that match this regexp will be accepted + AcceptPaths []*regexp.Regexp + // Paths that match this regexp will be rejected, even if they match the above + RejectPaths []*regexp.Regexp + // Hosts are required to match this list of regexp + AcceptHosts []*regexp.Regexp + // Methods that match this regexp are rejected + RejectMethods []*regexp.Regexp + // The delegate to call to handle accepted requests. + delegate http.Handler +} + +// Splits a comma separated list of regexps into a array of Regexp objects. +func MakeRegexpArray(str string) ([]*regexp.Regexp, error) { + parts := strings.Split(str, ",") + result := make([]*regexp.Regexp, len(parts)) + for ix := range parts { + re, err := regexp.Compile(parts[ix]) + if err != nil { + return nil, err + } + result[ix] = re + } + return result, nil +} + +func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { + result, err := MakeRegexpArray(str) + if err != nil { + glog.Fatalf("Error compiling re: %v", err) + } + return result +} + +func matchesRegexp(str string, regexps []*regexp.Regexp) bool { + for _, re := range regexps { + if re.MatchString(str) { + glog.V(6).Infof("%v matched %s", str, re) + return true + } + } + return false +} + +func (f *FilterServer) accept(method, path, host string) bool { + if matchesRegexp(path, f.RejectPaths) { + glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) + return false + } + if matchesRegexp(method, f.RejectMethods) { + glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) + return false + } + if matchesRegexp(path, f.AcceptPaths) && matchesRegexp(host, f.AcceptHosts) { + glog.V(3).Infof("Filter accepting %v %v %v", method, path, host) + return true + } + glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) + return false +} + +// Make a copy of f which passes requests along to the new delegate. +func (f *FilterServer) HandlerFor(delegate http.Handler) *FilterServer { + f2 := *f + f2.delegate = delegate + return &f2 +} + +// Get host from a host header value like "localhost" or "localhost:8080" +func extractHost(header string) (host string) { + host, _, err := net.SplitHostPort(header) + if err != nil { + host = header + } + return host +} + +func (f *FilterServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + host := extractHost(req.Host) + if f.accept(req.Method, req.URL.Path, host) { + f.delegate.ServeHTTP(rw, req) + return + } + rw.WriteHeader(http.StatusForbidden) + rw.Write([]byte("

Unauthorized

")) +} + +// ProxyServer is a http.Handler which proxies Kubernetes APIs to remote API server. +type ProxyServer struct { + handler http.Handler +} + +// NewProxyServer creates and installs a new ProxyServer. +// It automatically registers the created ProxyServer to http.DefaultServeMux. +// 'filter', if non-nil, protects requests to the api only. +func NewProxyServer(filebase string, apiProxyPrefix string, staticPrefix string, filter *FilterServer, cfg *restclient.Config) (*ProxyServer, error) { + host := cfg.Host + if !strings.HasSuffix(host, "/") { + host = host + "/" + } + target, err := url.Parse(host) + if err != nil { + return nil, err + } + proxy := newProxy(target) + if proxy.Transport, err = restclient.TransportFor(cfg); err != nil { + return nil, err + } + proxyServer := http.Handler(proxy) + if filter != nil { + proxyServer = filter.HandlerFor(proxyServer) + } + + if !strings.HasPrefix(apiProxyPrefix, "/api") { + proxyServer = stripLeaveSlash(apiProxyPrefix, proxyServer) + } + + mux := http.NewServeMux() + mux.Handle(apiProxyPrefix, proxyServer) + if filebase != "" { + // Require user to explicitly request this behavior rather than + // serving their working directory by default. + mux.Handle(staticPrefix, newFileHandler(staticPrefix, filebase)) + } + return &ProxyServer{handler: mux}, nil +} + +// Listen is a simple wrapper around net.Listen. +func (s *ProxyServer) Listen(address string, port int) (net.Listener, error) { + return net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) +} + +// ListenUnix does net.Listen for a unix socket +func (s *ProxyServer) ListenUnix(path string) (net.Listener, error) { + // Remove any socket, stale or not, but fall through for other files + fi, err := os.Stat(path) + if err == nil && (fi.Mode()&os.ModeSocket) != 0 { + os.Remove(path) + } + // Default to only user accessible socket, caller can open up later if desired + oldmask, _ := util.Umask(0077) + l, err := net.Listen("unix", path) + util.Umask(oldmask) + return l, err +} + +// Serve starts the server using given listener, loops forever. +func (s *ProxyServer) ServeOnListener(l net.Listener) error { + server := http.Server{ + Handler: s.handler, + } + return server.Serve(l) +} + +func newProxy(target *url.URL) *httputil.ReverseProxy { + director := func(req *http.Request) { + req.URL.Scheme = target.Scheme + req.URL.Host = target.Host + req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) + } + return &httputil.ReverseProxy{Director: director, FlushInterval: ReverseProxyFlushInterval} +} + +func newFileHandler(prefix, base string) http.Handler { + return http.StripPrefix(prefix, http.FileServer(http.Dir(base))) +} + +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + switch { + case aslash && bslash: + return a + b[1:] + case !aslash && !bslash: + return a + "/" + b + } + return a + b +} + +// like http.StripPrefix, but always leaves an initial slash. (so that our +// regexps will work.) +func stripLeaveSlash(prefix string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + p := strings.TrimPrefix(req.URL.Path, prefix) + if len(p) >= len(req.URL.Path) { + http.NotFound(w, req) + return + } + if len(p) > 0 && p[:1] != "/" { + p = "/" + p + } + req.URL.Path = p + h.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go new file mode 100644 index 00000000..ae19557f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go @@ -0,0 +1,747 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "io" + "net/url" + "os" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/sets" +) + +var FileExtensions = []string{".json", ".yaml", ".yml"} +var InputExtensions = append(FileExtensions, "stdin") + +const defaultHttpGetAttempts int = 3 + +// Builder provides convenience functions for taking arguments and parameters +// from the command line and converting them to a list of resources to iterate +// over using the Visitor interface. +type Builder struct { + mapper *Mapper + + errs []error + + paths []Visitor + stream bool + dir bool + + selector labels.Selector + selectAll bool + + resources []string + + namespace string + names []string + + resourceTuples []resourceTuple + + defaultNamespace bool + requireNamespace bool + + flatten bool + latest bool + + requireObject bool + + singleResourceType bool + continueOnError bool + + singular bool + + export bool + + schema validation.Schema +} + +type resourceTuple struct { + Resource string + Name string +} + +// NewBuilder creates a builder that operates on generic objects. +func NewBuilder(mapper meta.RESTMapper, typer runtime.ObjectTyper, clientMapper ClientMapper, decoder runtime.Decoder) *Builder { + return &Builder{ + mapper: &Mapper{typer, mapper, clientMapper, decoder}, + requireObject: true, + } +} + +func (b *Builder) Schema(schema validation.Schema) *Builder { + b.schema = schema + return b +} + +// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) +// If enforceNamespace is false, namespaces in the specs will be allowed to +// override the default namespace. If it is true, namespaces that don't match +// will cause an error. +// If ContinueOnError() is set prior to this method, objects on the path that are not +// recognized will be ignored (but logged at V(2)). +func (b *Builder) FilenameParam(enforceNamespace, recursive bool, paths ...string) *Builder { + for _, s := range paths { + switch { + case s == "-": + b.Stdin() + case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: + url, err := url.Parse(s) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) + continue + } + b.URL(defaultHttpGetAttempts, url) + default: + if !recursive { + b.singular = true + } + b.Path(recursive, s) + } + } + + if enforceNamespace { + b.RequireNamespace() + } + + return b +} + +// URL accepts a number of URLs directly. +func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { + for _, u := range urls { + b.paths = append(b.paths, &URLVisitor{ + URL: u, + StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), + HttpAttemptCount: httpAttemptCount, + }) + } + return b +} + +// Stdin will read objects from the standard input. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). +func (b *Builder) Stdin() *Builder { + b.stream = true + b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) + return b +} + +// Stream will read objects from the provided reader, and if an error occurs will +// include the name string in the error message. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). +func (b *Builder) Stream(r io.Reader, name string) *Builder { + b.stream = true + b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) + return b +} + +// Path accepts a set of paths that may be files, directories (all can containing +// one or more resources). Creates a FileVisitor for each file and then each +// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set +// prior to this method being called, objects on the path that are unrecognized will be +// ignored (but logged at V(2)). +func (b *Builder) Path(recursive bool, paths ...string) *Builder { + for _, p := range paths { + _, err := os.Stat(p) + if os.IsNotExist(err) { + b.errs = append(b.errs, fmt.Errorf("the path %q does not exist", p)) + continue + } + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) + continue + } + + visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) + } + if len(visitors) > 1 { + b.dir = true + } + + b.paths = append(b.paths, visitors...) + } + return b +} + +// ResourceTypes is a list of types of resources to operate on, when listing objects on +// the server or retrieving objects that match a selector. +func (b *Builder) ResourceTypes(types ...string) *Builder { + b.resources = append(b.resources, types...) + return b +} + +// ResourceNames accepts a default type and one or more names, and creates tuples of +// resources +func (b *Builder) ResourceNames(resource string, names ...string) *Builder { + for _, name := range names { + // See if this input string is of type/name format + tuple, ok, err := splitResourceTypeName(name) + if err != nil { + b.errs = append(b.errs, err) + return b + } + + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + continue + } + + // Use the given default type to create a resource tuple + b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) + } + return b +} + +// SelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources invoke `b.Selector(labels.Everything)`. +func (b *Builder) SelectorParam(s string) *Builder { + selector, err := labels.Parse(s) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the provided selector %q is not valid: %v", s, err)) + return b + } + if selector.Empty() { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non empty selector %q with previously set 'all' parameter. ", s)) + return b + } + return b.Selector(selector) +} + +// Selector accepts a selector directly, and if non nil will trigger a list action. +func (b *Builder) Selector(selector labels.Selector) *Builder { + b.selector = selector + return b +} + +// ExportParam accepts the export boolean for these resources +func (b *Builder) ExportParam(export bool) *Builder { + b.export = export + return b +} + +// NamespaceParam accepts the namespace that these resources should be +// considered under from - used by DefaultNamespace() and RequireNamespace() +func (b *Builder) NamespaceParam(namespace string) *Builder { + b.namespace = namespace + return b +} + +// DefaultNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty. +func (b *Builder) DefaultNamespace() *Builder { + b.defaultNamespace = true + return b +} + +// AllNamespaces instructs the builder to use NamespaceAll as a namespace to request resources +// acroll all namespace. This overrides the namespace set by NamespaceParam(). +func (b *Builder) AllNamespaces(allNamespace bool) *Builder { + if allNamespace { + b.namespace = api.NamespaceAll + } + return b +} + +// RequireNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty, and if the value on the resource does not match +// NamespaceParam() an error will be returned. +func (b *Builder) RequireNamespace() *Builder { + b.requireNamespace = true + return b +} + +// SelectEverythingParam +func (b *Builder) SelectAllParam(selectAll bool) *Builder { + if selectAll && b.selector != nil { + b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) + return b + } + b.selectAll = selectAll + return b +} + +// ResourceTypeOrNameArgs indicates that the builder should accept arguments +// of the form `([,,...]| [,,...])`. When one argument is +// received, the types provided will be retrieved from the server (and be comma delimited). +// When two or more arguments are received, they must be a single type and resource name(s). +// The allowEmptySelector permits to select all the resources (via Everything func). +func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { + args = normalizeMultipleResourcesArgs(args) + if ok, err := hasCombinedTypeArgs(args); ok { + if err != nil { + b.errs = append(b.errs, err) + return b + } + for _, s := range args { + tuple, ok, err := splitResourceTypeName(s) + if err != nil { + b.errs = append(b.errs, err) + return b + } + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + } + } + return b + } + if len(args) > 0 { + // Try replacing aliases only in types + args[0] = b.replaceAliases(args[0]) + } + switch { + case len(args) > 2: + b.names = append(b.names, args[1:]...) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 2: + b.names = append(b.names, args[1]) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 1: + b.ResourceTypes(SplitResourceArgument(args[0])...) + if b.selector == nil && allowEmptySelector { + b.selector = labels.Everything() + } + case len(args) == 0: + default: + b.errs = append(b.errs, fmt.Errorf("when passing arguments, must be resource or resource and name")) + } + return b +} + +// replaceAliases accepts an argument and tries to expand any existing +// aliases found in it +func (b *Builder) replaceAliases(input string) string { + replaced := []string{} + for _, arg := range strings.Split(input, ",") { + if aliases, ok := b.mapper.AliasesForResource(arg); ok { + arg = strings.Join(aliases, ",") + } + replaced = append(replaced, arg) + } + return strings.Join(replaced, ",") +} + +func hasCombinedTypeArgs(args []string) (bool, error) { + hasSlash := 0 + for _, s := range args { + if strings.Contains(s, "/") { + hasSlash++ + } + } + switch { + case hasSlash > 0 && hasSlash == len(args): + return true, nil + case hasSlash > 0 && hasSlash != len(args): + return true, fmt.Errorf("when passing arguments in resource/name form, all arguments must include the resource") + default: + return false, nil + } +} + +// Normalize args convert multiple resources to resource tuples, a,b,c d +// as a transform to a/d b/d c/d +func normalizeMultipleResourcesArgs(args []string) []string { + if len(args) >= 2 { + resources := []string{} + resources = append(resources, SplitResourceArgument(args[0])...) + if len(resources) > 1 { + names := []string{} + names = append(names, args[1:]...) + newArgs := []string{} + for _, resource := range resources { + for _, name := range names { + newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) + } + } + return newArgs + } + } + return args +} + +// splitResourceTypeName handles type/name resource formats and returns a resource tuple +// (empty or not), whether it successfully found one, and an error +func splitResourceTypeName(s string) (resourceTuple, bool, error) { + if !strings.Contains(s, "/") { + return resourceTuple{}, false, nil + } + seg := strings.Split(s, "/") + if len(seg) != 2 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") + } + resource, name := seg[0], seg[1] + if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") + } + return resourceTuple{Resource: resource, Name: name}, true, nil +} + +// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object +// compatible types into individual entries and give them their own items. The original object +// is not passed to any visitors. +func (b *Builder) Flatten() *Builder { + b.flatten = true + return b +} + +// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. +func (b *Builder) Latest() *Builder { + b.latest = true + return b +} + +// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. +func (b *Builder) RequireObject(require bool) *Builder { + b.requireObject = require + return b +} + +// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits +// return errors or some objects cannot be loaded. The default behavior is to terminate after +// the first error is returned from a VisitorFunc. +func (b *Builder) ContinueOnError() *Builder { + b.continueOnError = true + return b +} + +// SingleResourceType will cause the builder to error if the user specifies more than a single type +// of resource. +func (b *Builder) SingleResourceType() *Builder { + b.singleResourceType = true + return b +} + +// mappingFor returns the RESTMapping for the Kind referenced by the resource. +// prefers a fully specified GroupVersionResource match. If we don't have one match on GroupResource +func (b *Builder) mappingFor(resourceArg string) (*meta.RESTMapping, error) { + fullySpecifiedGVR, groupResource := unversioned.ParseResourceArg(resourceArg) + gvk := unversioned.GroupVersionKind{} + if fullySpecifiedGVR != nil { + gvk, _ = b.mapper.KindFor(*fullySpecifiedGVR) + } + if gvk.IsEmpty() { + var err error + gvk, err = b.mapper.KindFor(groupResource.WithVersion("")) + if err != nil { + return nil, err + } + } + + return b.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) +} + +func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { + if len(b.resources) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + mappings := []*meta.RESTMapping{} + for _, r := range b.resources { + mapping, err := b.mappingFor(r) + if err != nil { + return nil, err + } + + mappings = append(mappings, mapping) + } + return mappings, nil +} + +func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { + mappings := make(map[string]*meta.RESTMapping) + canonical := make(map[string]struct{}) + for _, r := range b.resourceTuples { + if _, ok := mappings[r.Resource]; ok { + continue + } + mapping, err := b.mappingFor(r.Resource) + if err != nil { + return nil, err + } + + mappings[mapping.Resource] = mapping + mappings[r.Resource] = mapping + canonical[mapping.Resource] = struct{}{} + } + if len(canonical) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + return mappings, nil +} + +func (b *Builder) visitorResult() *Result { + if len(b.errs) > 0 { + return &Result{err: utilerrors.NewAggregate(b.errs)} + } + + if b.selectAll { + b.selector = labels.Everything() + } + + // visit selectors + if b.selector != nil { + if len(b.names) != 0 { + return &Result{err: fmt.Errorf("name cannot be provided when a selector is specified")} + } + if len(b.resourceTuples) != 0 { + return &Result{err: fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")} + } + if len(b.resources) == 0 { + return &Result{err: fmt.Errorf("at least one resource must be specified to use a selector")} + } + // empty selector has different error message for paths being provided + if len(b.paths) != 0 { + if b.selector.Empty() { + return &Result{err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} + } else { + return &Result{err: fmt.Errorf("a selector may not be specified when path, URL, or stdin is provided as input")} + } + } + mappings, err := b.resourceMappings() + if err != nil { + return &Result{err: err} + } + + visitors := []Visitor{} + for _, mapping := range mappings { + client, err := b.mapper.ClientForMapping(mapping) + if err != nil { + return &Result{err: err} + } + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } + visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, b.selector, b.export)) + } + if b.continueOnError { + return &Result{visitor: EagerVisitorList(visitors), sources: visitors} + } + return &Result{visitor: VisitorList(visitors), sources: visitors} + } + + // visit items specified by resource and name + if len(b.resourceTuples) != 0 { + // if b.singular is false, this could be by default, so double-check length + // of resourceTuples to determine if in fact it is singular or not + isSingular := b.singular + if !isSingular { + isSingular = len(b.resourceTuples) == 1 + } + + if len(b.paths) != 0 { + return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} + } + if len(b.resources) != 0 { + return &Result{singular: isSingular, err: fmt.Errorf("you may not specify individual resources and bulk resources in the same call")} + } + + // retrieve one client for each resource + mappings, err := b.resourceTupleMappings() + if err != nil { + return &Result{singular: isSingular, err: err} + } + clients := make(map[string]RESTClient) + for _, mapping := range mappings { + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource) + if _, ok := clients[s]; ok { + continue + } + client, err := b.mapper.ClientForMapping(mapping) + if err != nil { + return &Result{err: err} + } + clients[s] = client + } + + items := []Visitor{} + for _, tuple := range b.resourceTuples { + mapping, ok := mappings[tuple.Resource] + if !ok { + return &Result{singular: isSingular, err: fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)} + } + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource) + client, ok := clients[s] + if !ok { + return &Result{singular: isSingular, err: fmt.Errorf("could not find a client for resource %q", tuple.Resource)} + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + return &Result{singular: isSingular, err: fmt.Errorf("namespace may not be empty when retrieving a resource by name")} + } + } + + info := NewInfo(client, mapping, selectorNamespace, tuple.Name, b.export) + items = append(items, info) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(items) + } else { + visitors = VisitorList(items) + } + return &Result{singular: isSingular, visitor: visitors, sources: items} + } + + // visit items specified by name + if len(b.names) != 0 { + isSingular := len(b.names) == 1 + + if len(b.paths) != 0 { + return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} + } + if len(b.resources) == 0 { + return &Result{singular: isSingular, err: fmt.Errorf("you must provide a resource and a resource name together")} + } + if len(b.resources) > 1 { + return &Result{singular: isSingular, err: fmt.Errorf("you must specify only one resource")} + } + + mappings, err := b.resourceMappings() + if err != nil { + return &Result{singular: isSingular, err: err} + } + mapping := mappings[0] + + client, err := b.mapper.ClientForMapping(mapping) + if err != nil { + return &Result{err: err} + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + return &Result{singular: isSingular, err: fmt.Errorf("namespace may not be empty when retrieving a resource by name")} + } + } + + visitors := []Visitor{} + for _, name := range b.names { + info := NewInfo(client, mapping, selectorNamespace, name, b.export) + visitors = append(visitors, info) + } + return &Result{singular: isSingular, visitor: VisitorList(visitors), sources: visitors} + } + + // visit items specified by paths + if len(b.paths) != 0 { + singular := !b.dir && !b.stream && len(b.paths) == 1 + if len(b.resources) != 0 { + return &Result{singular: singular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")} + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(b.paths) + } else { + visitors = VisitorList(b.paths) + } + + // only items from disk can be refetched + if b.latest { + // must flatten lists prior to fetching + if b.flatten { + visitors = NewFlattenListVisitor(visitors, b.mapper) + } + // must set namespace prior to fetching + if b.defaultNamespace { + visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) + } + visitors = NewDecoratedVisitor(visitors, RetrieveLatest) + } + return &Result{singular: singular, visitor: visitors, sources: b.paths} + } + + return &Result{err: fmt.Errorf("you must provide one or more resources by argument or filename (%s)", strings.Join(InputExtensions, "|"))} +} + +// Do returns a Result object with a Visitor for the resources identified by the Builder. +// The visitor will respect the error behavior specified by ContinueOnError. Note that stream +// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list +// for further iteration. +func (b *Builder) Do() *Result { + r := b.visitorResult() + if r.err != nil { + return r + } + if b.flatten { + r.visitor = NewFlattenListVisitor(r.visitor, b.mapper) + } + helpers := []VisitorFunc{} + if b.defaultNamespace { + helpers = append(helpers, SetNamespace(b.namespace)) + } + if b.requireNamespace { + helpers = append(helpers, RequireNamespace(b.namespace)) + } + helpers = append(helpers, FilterNamespace) + if b.requireObject { + helpers = append(helpers, RetrieveLazy) + } + r.visitor = NewDecoratedVisitor(r.visitor, helpers...) + if b.continueOnError { + r.visitor = ContinueOnErrorVisitor{r.visitor} + } + return r +} + +// SplitResourceArgument splits the argument with commas and returns unique +// strings in the original order. +func SplitResourceArgument(arg string) []string { + out := []string{} + set := sets.NewString() + for _, s := range strings.Split(arg, ",") { + if set.Has(s) { + continue + } + set.Insert(s) + out = append(out, s) + } + return out +} + +// HasNames returns true if the provided args contain resource names +func HasNames(args []string) (bool, error) { + args = normalizeMultipleResourcesArgs(args) + hasCombinedTypes, err := hasCombinedTypeArgs(args) + if err != nil { + return false, err + } + return hasCombinedTypes || len(args) > 1, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go new file mode 100644 index 00000000..05b35cfd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resource assists clients in dealing with RESTful objects that match the +// Kubernetes API conventions. The Helper object provides simple CRUD operations +// on resources. The Visitor interface makes it easy to deal with multiple resources +// in bulk for retrieval and operation. The Builder object simplifies converting +// standard command line arguments and parameters into a Visitor that can iterate +// over all of the identified resources, whether on the server or on the local +// filesystem. +package resource diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go new file mode 100644 index 00000000..849a6c04 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go @@ -0,0 +1,166 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "strconv" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// Helper provides methods for retrieving or mutating a RESTful +// resource. +type Helper struct { + // The name of this resource as the server would recognize it + Resource string + // A RESTClient capable of mutating this resource. + RESTClient RESTClient + // An interface for reading or writing the resource version of this + // type. + Versioner runtime.ResourceVersioner + // True if the resource type is scoped to namespaces + NamespaceScoped bool +} + +// NewHelper creates a Helper from a ResourceMapping +func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { + return &Helper{ + Resource: mapping.Resource, + RESTClient: client, + Versioner: mapping.MetadataAccessor, + NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, + } +} + +func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name) + if export { + req.Param("export", strconv.FormatBool(export)) + } + return req.Do().Get() +} + +// TODO: add field selector +func (m *Helper) List(namespace, apiVersion string, selector labels.Selector, export bool) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + LabelsSelectorParam(selector) + if export { + req.Param("export", strconv.FormatBool(export)) + } + return req.Do().Get() +} + +func (m *Helper) Watch(namespace, resourceVersion, apiVersion string, labelSelector labels.Selector) (watch.Interface, error) { + return m.RESTClient.Get(). + Prefix("watch"). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Param("resourceVersion", resourceVersion). + LabelsSelectorParam(labelSelector). + Watch() +} + +func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { + return m.RESTClient.Get(). + Prefix("watch"). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Param("resourceVersion", resourceVersion). + Watch() +} + +func (m *Helper) Delete(namespace, name string) error { + return m.RESTClient.Delete(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Do(). + Error() +} + +func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { + if modify { + // Attempt to version the object based on client logic. + version, err := m.Versioner.ResourceVersion(obj) + if err != nil { + // We don't know how to clear the version on this object, so send it to the server as is + return m.createResource(m.RESTClient, m.Resource, namespace, obj) + } + if version != "" { + if err := m.Versioner.SetResourceVersion(obj, ""); err != nil { + return nil, err + } + } + } + + return m.createResource(m.RESTClient, m.Resource, namespace, obj) +} + +func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object) (runtime.Object, error) { + return c.Post().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Body(obj).Do().Get() +} +func (m *Helper) Patch(namespace, name string, pt api.PatchType, data []byte) (runtime.Object, error) { + return m.RESTClient.Patch(pt). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Body(data). + Do(). + Get() +} + +func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { + c := m.RESTClient + + // Attempt to version the object based on client logic. + version, err := m.Versioner.ResourceVersion(obj) + if err != nil { + // We don't know how to version this object, so send it to the server as is + return m.replaceResource(c, m.Resource, namespace, name, obj) + } + if version == "" && overwrite { + // Retrieve the current version of the object to overwrite the server object + serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).Do().Get() + if err != nil { + // The object does not exist, but we want it to be created + return m.replaceResource(c, m.Resource, namespace, name, obj) + } + serverVersion, err := m.Versioner.ResourceVersion(serverObj) + if err != nil { + return nil, err + } + if err := m.Versioner.SetResourceVersion(obj, serverVersion); err != nil { + return nil, err + } + } + + return m.replaceResource(c, m.Resource, namespace, name, obj) +} + +func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object) (runtime.Object, error) { + return c.Put().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Name(name).Body(obj).Do().Get() +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go new file mode 100644 index 00000000..2639a61e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + client "k8s.io/kubernetes/pkg/client/restclient" +) + +// RESTClient is a client helper for dealing with RESTful resources +// in a generic way. +type RESTClient interface { + Get() *client.Request + Post() *client.Request + Patch(api.PatchType) *client.Request + Delete() *client.Request + Put() *client.Request +} + +// ClientMapper abstracts retrieving a Client for mapped objects. +type ClientMapper interface { + ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) +} + +// ClientMapperFunc implements ClientMapper for a function +type ClientMapperFunc func(mapping *meta.RESTMapping) (RESTClient, error) + +// ClientForMapping implements ClientMapper +func (f ClientMapperFunc) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { + return f(mapping) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go new file mode 100644 index 00000000..1ca922a0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go @@ -0,0 +1,167 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" + "k8s.io/kubernetes/pkg/runtime" +) + +// DisabledClientForMapping allows callers to avoid allowing remote calls when handling +// resources. +type DisabledClientForMapping struct { + ClientMapper +} + +func (f DisabledClientForMapping) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { + return nil, nil +} + +// Mapper is a convenience struct for holding references to the three interfaces +// needed to create Info for arbitrary objects. +type Mapper struct { + runtime.ObjectTyper + meta.RESTMapper + ClientMapper + runtime.Decoder +} + +// InfoForData creates an Info object for the given data. An error is returned +// if any of the decoding or client lookup steps fail. Name and namespace will be +// set into Info if the mapping's MetadataAccessor can retrieve them. +func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { + versions := &runtime.VersionedObjects{} + _, gvk, err := m.Decode(data, nil, versions) + if err != nil { + return nil, fmt.Errorf("unable to decode %q: %v", source, err) + } + var obj runtime.Object + var versioned runtime.Object + if isThirdParty, gvkOut, err := thirdpartyresourcedata.IsThirdPartyObject(data, gvk); err != nil { + return nil, err + } else if isThirdParty { + obj, err = runtime.Decode(thirdpartyresourcedata.NewDecoder(nil, gvkOut.Kind), data) + versioned = obj + gvk = gvkOut + } else { + obj, versioned = versions.Last(), versions.First() + } + if err != nil { + return nil, fmt.Errorf("unable to decode %q: %v [%v]", source, err, gvk) + } + mapping, err := m.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, fmt.Errorf("unable to recognize %q: %v", source, err) + } + + client, err := m.ClientForMapping(mapping) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + + name, _ := mapping.MetadataAccessor.Name(obj) + namespace, _ := mapping.MetadataAccessor.Namespace(obj) + resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) + + return &Info{ + Mapping: mapping, + Client: client, + Namespace: namespace, + Name: name, + Source: source, + VersionedObject: versioned, + Object: obj, + ResourceVersion: resourceVersion, + }, nil +} + +// InfoForObject creates an Info object for the given Object. An error is returned +// if the object cannot be introspected. Name and namespace will be set into Info +// if the mapping's MetadataAccessor can retrieve them. +func (m *Mapper) InfoForObject(obj runtime.Object, preferredGVKs []unversioned.GroupVersionKind) (*Info, error) { + groupVersionKinds, _, err := m.ObjectKinds(obj) + if err != nil { + return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) + } + + groupVersionKind := groupVersionKinds[0] + if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { + groupVersionKind = preferredObjectKind(groupVersionKinds, preferredGVKs) + } + + mapping, err := m.RESTMapping(groupVersionKind.GroupKind(), groupVersionKind.Version) + if err != nil { + return nil, fmt.Errorf("unable to recognize %v: %v", groupVersionKind, err) + } + client, err := m.ClientForMapping(mapping) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + name, _ := mapping.MetadataAccessor.Name(obj) + namespace, _ := mapping.MetadataAccessor.Namespace(obj) + resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) + return &Info{ + Mapping: mapping, + Client: client, + Namespace: namespace, + Name: name, + + Object: obj, + ResourceVersion: resourceVersion, + }, nil +} + +// preferredObjectKind picks the possibility that most closely matches the priority list in this order: +// GroupVersionKind matches (exact match) +// GroupKind matches +// Group matches +func preferredObjectKind(possibilities []unversioned.GroupVersionKind, preferences []unversioned.GroupVersionKind) unversioned.GroupVersionKind { + // Exact match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility == priority { + return possibility + } + } + } + + // GroupKind match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.GroupKind() == priority.GroupKind() { + return possibility + } + } + } + + // Group match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.Group == priority.Group { + return possibility + } + } + } + + // Just pick the first + return possibilities[0] +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go new file mode 100644 index 00000000..562fc0cc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go @@ -0,0 +1,291 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/watch" +) + +// ErrMatchFunc can be used to filter errors that may not be true failures. +type ErrMatchFunc func(error) bool + +// Result contains helper methods for dealing with the outcome of a Builder. +type Result struct { + err error + visitor Visitor + + sources []Visitor + singular bool + + ignoreErrors []utilerrors.Matcher + + // populated by a call to Infos + info []*Info +} + +// IgnoreErrors will filter errors that occur when by visiting the result +// (but not errors that occur by creating the result in the first place), +// eliminating any that match fns. This is best used in combination with +// Builder.ContinueOnError(), where the visitors accumulate errors and return +// them after visiting as a slice of errors. If no errors remain after +// filtering, the various visitor methods on Result will return nil for +// err. +func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { + for _, fn := range fns { + r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) + } + return r +} + +// Err returns one or more errors (via a util.ErrorList) that occurred prior +// to visiting the elements in the visitor. To see all errors including those +// that occur during visitation, invoke Infos(). +func (r *Result) Err() error { + return r.err +} + +// Visit implements the Visitor interface on the items described in the Builder. +// Note that some visitor sources are not traversable more than once, or may +// return different results. If you wish to operate on the same set of resources +// multiple times, use the Infos() method. +func (r *Result) Visit(fn VisitorFunc) error { + if r.err != nil { + return r.err + } + err := r.visitor.Visit(fn) + return utilerrors.FilterOut(err, r.ignoreErrors...) +} + +// IntoSingular sets the provided boolean pointer to true if the Builder input +// reflected a single item, or multiple. +func (r *Result) IntoSingular(b *bool) *Result { + *b = r.singular + return r +} + +// Infos returns an array of all of the resource infos retrieved via traversal. +// Will attempt to traverse the entire set of visitors only once, and will return +// a cached list on subsequent calls. +func (r *Result) Infos() ([]*Info, error) { + if r.err != nil { + return nil, r.err + } + if r.info != nil { + return r.info, nil + } + + infos := []*Info{} + err := r.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + infos = append(infos, info) + return nil + }) + err = utilerrors.FilterOut(err, r.ignoreErrors...) + + r.info, r.err = infos, err + return infos, err +} + +// Object returns a single object representing the output of a single visit to all +// found resources. If the Builder was a singular context (expected to return a +// single resource by user input) and only a single resource was found, the resource +// will be returned as is. Otherwise, the returned resources will be part of an +// api.List. The ResourceVersion of the api.List will be set only if it is identical +// across all infos returned. +func (r *Result) Object() (runtime.Object, error) { + infos, err := r.Infos() + if err != nil { + return nil, err + } + + versions := sets.String{} + objects := []runtime.Object{} + for _, info := range infos { + if info.Object != nil { + objects = append(objects, info.Object) + versions.Insert(info.ResourceVersion) + } + } + + if len(objects) == 1 { + if r.singular { + return objects[0], nil + } + // if the item is a list already, don't create another list + if meta.IsListType(objects[0]) { + return objects[0], nil + } + } + + version := "" + if len(versions) == 1 { + version = versions.List()[0] + } + return &api.List{ + ListMeta: unversioned.ListMeta{ + ResourceVersion: version, + }, + Items: objects, + }, err +} + +// ResourceMapping returns a single meta.RESTMapping representing the +// resources located by the builder, or an error if more than one +// mapping was found. +func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { + if r.err != nil { + return nil, r.err + } + mappings := map[string]*meta.RESTMapping{} + for i := range r.sources { + m, ok := r.sources[i].(ResourceMapping) + if !ok { + return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) + } + mapping := m.ResourceMapping() + mappings[mapping.Resource] = mapping + } + if len(mappings) != 1 { + return nil, fmt.Errorf("expected only a single resource type") + } + for _, mapping := range mappings { + return mapping, nil + } + return nil, nil +} + +// Watch retrieves changes that occur on the server to the specified resource. +// It currently supports watching a single source - if the resource source +// (selectors or pure types) can be watched, they will be, otherwise the list +// will be visited (equivalent to the Infos() call) and if there is a single +// resource present, it will be watched, otherwise an error will be returned. +func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { + if r.err != nil { + return nil, r.err + } + if len(r.sources) != 1 { + return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") + } + w, ok := r.sources[0].(Watchable) + if !ok { + info, err := r.Infos() + if err != nil { + return nil, err + } + if len(info) != 1 { + return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) + } + return info[0].Watch(resourceVersion) + } + return w.Watch(resourceVersion) +} + +// AsVersionedObject converts a list of infos into a single object - either a List containing +// the objects as children, or if only a single Object is present, as that object. The provided +// version will be preferred as the conversion target, but the Object's mapping version will be +// used if that version is not present. +func AsVersionedObject(infos []*Info, forceList bool, version unversioned.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { + objects, err := AsVersionedObjects(infos, version, encoder) + if err != nil { + return nil, err + } + + var object runtime.Object + if len(objects) == 1 && !forceList { + object = objects[0] + } else { + object = &api.List{Items: objects} + converted, err := tryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion) + if err != nil { + return nil, err + } + object = converted + } + return object, nil +} + +// AsVersionedObjects converts a list of infos into versioned objects. The provided +// version will be preferred as the conversion target, but the Object's mapping version will be +// used if that version is not present. +func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { + objects := []runtime.Object{} + for _, info := range infos { + if info.Object == nil { + continue + } + + // TODO: use info.VersionedObject as the value? + switch obj := info.Object.(type) { + case *extensions.ThirdPartyResourceData: + objects = append(objects, &runtime.Unknown{Raw: obj.Data}) + continue + } + + // objects that are not part of api.Scheme must be converted to JSON + // TODO: convert to map[string]interface{}, attach to runtime.Unknown? + if !version.IsEmpty() { + if _, _, err := api.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) { + // TODO: ideally this would encode to version, but we don't expose multiple codecs here. + data, err := runtime.Encode(encoder, info.Object) + if err != nil { + return nil, err + } + // TODO: Set ContentEncoding and ContentType. + objects = append(objects, &runtime.Unknown{Raw: data}) + continue + } + } + + converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) + if err != nil { + return nil, err + } + objects = append(objects, converted) + } + return objects, nil +} + +// tryConvert attempts to convert the given object to the provided versions in order. This function assumes +// the object is in internal version. +func tryConvert(convertor runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) { + var last error + for _, version := range versions { + if version.IsEmpty() { + return object, nil + } + obj, err := convertor.ConvertToVersion(object, version) + if err != nil { + last = err + continue + } + return obj, nil + } + return nil, last +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go new file mode 100644 index 00000000..d29e119b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go @@ -0,0 +1,90 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// Selector is a Visitor for resources that match a label selector. +type Selector struct { + Client RESTClient + Mapping *meta.RESTMapping + Namespace string + Selector labels.Selector + Export bool +} + +// NewSelector creates a resource selector which hides details of getting items by their label selector. +func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace string, selector labels.Selector, export bool) *Selector { + return &Selector{ + Client: client, + Mapping: mapping, + Namespace: namespace, + Selector: selector, + Export: export, + } +} + +// Visit implements Visitor +func (r *Selector) Visit(fn VisitorFunc) error { + list, err := NewHelper(r.Client, r.Mapping).List(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector, r.Export) + if err != nil { + if errors.IsBadRequest(err) || errors.IsNotFound(err) { + if se, ok := err.(*errors.StatusError); ok { + // modify the message without hiding this is an API error + if r.Selector.Empty() { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, se.ErrStatus.Message) + } + return se + } + if r.Selector.Empty() { + return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) + } else { + return fmt.Errorf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, err) + } + } + return err + } + accessor := r.Mapping.MetadataAccessor + resourceVersion, _ := accessor.ResourceVersion(list) + info := &Info{ + Client: r.Client, + Mapping: r.Mapping, + Namespace: r.Namespace, + + Object: list, + ResourceVersion: resourceVersion, + } + return fn(info, nil) +} + +func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, resourceVersion, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (r *Selector) ResourceMapping() *meta.RESTMapping { + return r.Mapping +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go new file mode 100644 index 00000000..2430b312 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go @@ -0,0 +1,643 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/yaml" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + constSTDINstr string = "STDIN" + stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" +) + +// Visitor lets clients walk a list of resources. +type Visitor interface { + Visit(VisitorFunc) error +} + +// VisitorFunc implements the Visitor interface for a matching function. +// If there was a problem walking a list of resources, the incoming error +// will describe the problem and the function can decide how to handle that error. +// A nil returned indicates to accept an error to continue loops even when errors happen. +// This is useful for ignoring certain kinds of errors or aggregating errors in some way. +type VisitorFunc func(*Info, error) error + +// Watchable describes a resource that can be watched for changes that occur on the server, +// beginning after the provided resource version. +type Watchable interface { + Watch(resourceVersion string) (watch.Interface, error) +} + +// ResourceMapping allows an object to return the resource mapping associated with +// the resource or resources it represents. +type ResourceMapping interface { + ResourceMapping() *meta.RESTMapping +} + +// Info contains temporary info to execute a REST call, or show the results +// of an already completed REST call. +type Info struct { + Client RESTClient + Mapping *meta.RESTMapping + Namespace string + Name string + + // Optional, Source is the filename or URL to template file (.json or .yaml), + // or stdin to use to handle the resource + Source string + // Optional, this is the provided object in a versioned type before defaulting + // and conversions into its corresponding internal type. This is useful for + // reflecting on user intent which may be lost after defaulting and conversions. + VersionedObject interface{} + // Optional, this is the most recent value returned by the server if available + runtime.Object + // Optional, this is the most recent resource version the server knows about for + // this type of resource. It may not match the resource version of the object, + // but if set it should be equal to or newer than the resource version of the + // object (however the server defines resource version). + ResourceVersion string + // Optional, should this resource be exported, stripped of cluster-specific and instance specific fields + Export bool +} + +// NewInfo returns a new info object +func NewInfo(client RESTClient, mapping *meta.RESTMapping, namespace, name string, export bool) *Info { + return &Info{ + Client: client, + Mapping: mapping, + Namespace: namespace, + Name: name, + Export: export, + } +} + +// Visit implements Visitor +func (i *Info) Visit(fn VisitorFunc) error { + return fn(i, nil) +} + +// Get retrieves the object from the Namespace and Name fields +func (i *Info) Get() (err error) { + obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export) + if err != nil { + return err + } + i.Object = obj + i.ResourceVersion, _ = i.Mapping.MetadataAccessor.ResourceVersion(obj) + return nil +} + +// Refresh updates the object with another object. If ignoreError is set +// the Object will be updated even if name, namespace, or resourceVersion +// attributes cannot be loaded from the object. +func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { + name, err := i.Mapping.MetadataAccessor.Name(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Name = name + } + namespace, err := i.Mapping.MetadataAccessor.Namespace(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Namespace = namespace + } + version, err := i.Mapping.MetadataAccessor.ResourceVersion(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.ResourceVersion = version + } + i.Object = obj + return nil +} + +// Namespaced returns true if the object belongs to a namespace +func (i *Info) Namespaced() bool { + return i.Mapping != nil && i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace +} + +// Watch returns server changes to this object after it was retrieved. +func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (i *Info) ResourceMapping() *meta.RESTMapping { + return i.Mapping +} + +// VisitorList implements Visit for the sub visitors it contains. The first error +// returned from a child Visitor will terminate iteration. +type VisitorList []Visitor + +// Visit implements Visitor +func (l VisitorList) Visit(fn VisitorFunc) error { + for i := range l { + if err := l[i].Visit(fn); err != nil { + return err + } + } + return nil +} + +// EagerVisitorList implements Visit for the sub visitors it contains. All errors +// will be captured and returned at the end of iteration. +type EagerVisitorList []Visitor + +// Visit implements Visitor, and gathers errors that occur during processing until +// all sub visitors have been visited. +func (l EagerVisitorList) Visit(fn VisitorFunc) error { + errs := []error(nil) + for i := range l { + if err := l[i].Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }); err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) +} + +func ValidateSchema(data []byte, schema validation.Schema) error { + if schema == nil { + return nil + } + data, err := yaml.ToJSON(data) + if err != nil { + return fmt.Errorf("error converting to YAML: %v", err) + } + if err := schema.ValidateBytes(data); err != nil { + return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) + } + return nil +} + +// URLVisitor downloads the contents of a URL, and if successful, returns +// an info object representing the downloaded object. +type URLVisitor struct { + URL *url.URL + *StreamVisitor + HttpAttemptCount int +} + +func (v *URLVisitor) Visit(fn VisitorFunc) error { + body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) + if err != nil { + return err + } + defer body.Close() + v.StreamVisitor.Reader = body + return v.StreamVisitor.Visit(fn) +} + +// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. +func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { + var err error + var body io.ReadCloser + if attempts <= 0 { + return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) + } + for i := 0; i < attempts; i++ { + var statusCode int + var status string + if i > 0 { + time.Sleep(duration) + } + + // Try to get the URL + statusCode, status, body, err = get(u) + + // Retry Errors + if err != nil { + continue + } + + // Error - Set the error condition from the StatusCode + if statusCode != 200 { + err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) + } + + if statusCode >= 500 && statusCode < 600 { + // Retry 500's + continue + } else { + // Don't retry other StatusCodes + break + } + } + return body, err +} + +// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. +type httpget func(url string) (int, string, io.ReadCloser, error) + +// httpgetImpl Implements a function to retrieve a url and return the results. +func httpgetImpl(url string) (int, string, io.ReadCloser, error) { + resp, err := http.Get(url) + if err != nil { + return 0, "", nil, err + } + return resp.StatusCode, resp.Status, resp.Body, nil +} + +// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function +// passed to Visit. An error will terminate the visit. +type DecoratedVisitor struct { + visitor Visitor + decorators []VisitorFunc +} + +// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before +// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info +// object or terminate early with an error. +func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { + if len(fn) == 0 { + return v + } + return DecoratedVisitor{v, fn} +} + +// Visit implements Visitor +func (v DecoratedVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for i := range v.decorators { + if err := v.decorators[i](info, nil); err != nil { + return err + } + } + return fn(info, nil) + }) +} + +// ContinueOnErrorVisitor visits each item and, if an error occurs on +// any individual item, returns an aggregate error after all items +// are visited. +type ContinueOnErrorVisitor struct { + Visitor +} + +// Visit returns nil if no error occurs during traversal, a regular +// error if one occurs, or if multiple errors occur, an aggregate +// error. If the provided visitor fails on any individual item it +// will not prevent the remaining items from being visited. An error +// returned by the visitor directly may still result in some items +// not being visited. +func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { + errs := []error{} + err := v.Visitor.Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + if len(errs) == 1 { + return errs[0] + } + return utilerrors.NewAggregate(errs) +} + +// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list +// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying +// that interface - into multiple Infos. An error on any sub item (for instance, if a List +// contains an object that does not have a registered client or resource) will terminate +// the visit. +// TODO: allow errors to be aggregated? +type FlattenListVisitor struct { + Visitor + *Mapper +} + +// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects +// into individual items and then visit them individually. +func NewFlattenListVisitor(v Visitor, mapper *Mapper) Visitor { + return FlattenListVisitor{v, mapper} +} + +func (v FlattenListVisitor) Visit(fn VisitorFunc) error { + return v.Visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return fn(info, nil) + } + items, err := meta.ExtractList(info.Object) + if err != nil { + return fn(info, nil) + } + if errs := runtime.DecodeList(items, struct { + runtime.ObjectTyper + runtime.Decoder + }{v.Mapper, v.Mapper.Decoder}); len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + + // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list + var preferredGVKs []unversioned.GroupVersionKind + if info.Mapping != nil && !info.Mapping.GroupVersionKind.IsEmpty() { + preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) + } + + for i := range items { + item, err := v.InfoForObject(items[i], preferredGVKs) + if err != nil { + return err + } + if len(info.ResourceVersion) != 0 { + item.ResourceVersion = info.ResourceVersion + } + if err := fn(item, nil); err != nil { + return err + } + } + return nil + }) +} + +func ignoreFile(path string, extensions []string) bool { + if len(extensions) == 0 { + return false + } + ext := filepath.Ext(path) + for _, s := range extensions { + if s == ext { + return false + } + } + return true +} + +// FileVisitorForSTDIN return a special FileVisitor just for STDIN +func FileVisitorForSTDIN(mapper *Mapper, schema validation.Schema) Visitor { + return &FileVisitor{ + Path: constSTDINstr, + StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), + } +} + +// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. +// After FileVisitors open the files, they will pass a io.Reader to a StreamVisitor to do the reading. (stdin +// is also taken care of). Paths argument also accepts a single file, and will return a single visitor +func ExpandPathsToFileVisitors(mapper *Mapper, paths string, recursive bool, extensions []string, schema validation.Schema) ([]Visitor, error) { + var visitors []Visitor + err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if fi.IsDir() { + if path != paths && !recursive { + return filepath.SkipDir + } + return nil + } + // Don't check extension if the filepath was passed explicitly + if path != paths && ignoreFile(path, extensions) { + return nil + } + + visitor := &FileVisitor{ + Path: path, + StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), + } + + visitors = append(visitors, visitor) + return nil + }) + + if err != nil { + return nil, err + } + return visitors, nil +} + +// FileVisitor is wrapping around a StreamVisitor, to handle open/close files +type FileVisitor struct { + Path string + *StreamVisitor +} + +// Visit in a FileVisitor is just taking care of opening/closing files +func (v *FileVisitor) Visit(fn VisitorFunc) error { + var f *os.File + if v.Path == constSTDINstr { + f = os.Stdin + } else { + var err error + if f, err = os.Open(v.Path); err != nil { + return err + } + } + defer f.Close() + v.StreamVisitor.Reader = f + + return v.StreamVisitor.Visit(fn) +} + +// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be +// visited once. +// TODO: depends on objects being in JSON format before being passed to decode - need to implement +// a stream decoder method on runtime.Codec to properly handle this. +type StreamVisitor struct { + io.Reader + *Mapper + + Source string + Schema validation.Schema +} + +// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. +func NewStreamVisitor(r io.Reader, mapper *Mapper, source string, schema validation.Schema) *StreamVisitor { + return &StreamVisitor{ + Reader: r, + Mapper: mapper, + Source: source, + Schema: schema, + } +} + +// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. +func (v *StreamVisitor) Visit(fn VisitorFunc) error { + d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) + for { + ext := runtime.RawExtension{} + if err := d.Decode(&ext); err != nil { + if err == io.EOF { + return nil + } + return err + } + // TODO: This needs to be able to handle object in other encodings and schemas. + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { + continue + } + if err := ValidateSchema(ext.Raw, v.Schema); err != nil { + return fmt.Errorf("error validating %q: %v", v.Source, err) + } + info, err := v.InfoForData(ext.Raw, v.Source) + if err != nil { + if fnErr := fn(info, err); fnErr != nil { + return fnErr + } + continue + } + if err := fn(info, nil); err != nil { + return err + } + } +} + +func UpdateObjectNamespace(info *Info, err error) error { + if err != nil { + return err + } + if info.Object != nil { + return info.Mapping.MetadataAccessor.SetNamespace(info.Object, info.Namespace) + } + return nil +} + +// FilterNamespace omits the namespace if the object is not namespace scoped +func FilterNamespace(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + info.Namespace = "" + UpdateObjectNamespace(info, nil) + } + return nil +} + +// SetNamespace ensures that every Info object visited will have a namespace +// set. If info.Object is set, it will be mutated as well. +func SetNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + } + return nil + } +} + +// RequireNamespace will either set a namespace if none is provided on the +// Info object, or if the namespace is set and does not match the provided +// value, returns an error. This is intended to guard against administrators +// accidentally operating on resources outside their namespace. +func RequireNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + return nil + } + if info.Namespace != namespace { + return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) + } + return nil + } +} + +// RetrieveLatest updates the Object on each Info by invoking a standard client +// Get. +func RetrieveLatest(info *Info, err error) error { + if err != nil { + return err + } + if meta.IsListType(info.Object) { + return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") + } + if len(info.Name) == 0 { + return nil + } + if info.Namespaced() && len(info.Namespace) == 0 { + return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) + } + obj, err := NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name, info.Export) + if err != nil { + return err + } + info.Object = obj + info.ResourceVersion, _ = info.Mapping.MetadataAccessor.ResourceVersion(obj) + return nil +} + +// RetrieveLazy updates the object if it has not been loaded yet. +func RetrieveLazy(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return info.Get() + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go new file mode 100644 index 00000000..dd01e65f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go @@ -0,0 +1,2214 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "sort" + "strings" + "text/tabwriter" + "text/template" + "time" + + "github.com/ghodss/yaml" + "github.com/golang/glog" + "k8s.io/kubernetes/federation/apis/federation" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/jsonpath" + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + tabwriterMinWidth = 10 + tabwriterWidth = 4 + tabwriterPadding = 3 + tabwriterPadChar = ' ' + tabwriterFlags = 0 + loadBalancerWidth = 16 +) + +// GetPrinter takes a format type, an optional format argument. It will return true +// if the format is generic (untyped), otherwise it will return false. The printer +// is agnostic to schema versions, so you must send arguments to PrintObj in the +// version you wish them to be shown using a VersionedPrinter (typically when +// generic is true). +func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { + var printer ResourcePrinter + switch format { + case "json": + printer = &JSONPrinter{} + case "yaml": + printer = &YAMLPrinter{} + case "name": + printer = &NamePrinter{ + // TODO: this is wrong, these should be provided as an argument to GetPrinter + Typer: api.Scheme, + Decoder: api.Codecs.UniversalDecoder(), + } + case "template", "go-template": + if len(formatArgument) == 0 { + return nil, false, fmt.Errorf("template format specified but no template given") + } + var err error + printer, err = NewTemplatePrinter([]byte(formatArgument)) + if err != nil { + return nil, false, fmt.Errorf("error parsing template %s, %v\n", formatArgument, err) + } + case "templatefile", "go-template-file": + if len(formatArgument) == 0 { + return nil, false, fmt.Errorf("templatefile format specified but no template file given") + } + data, err := ioutil.ReadFile(formatArgument) + if err != nil { + return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) + } + printer, err = NewTemplatePrinter(data) + if err != nil { + return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) + } + case "jsonpath": + if len(formatArgument) == 0 { + return nil, false, fmt.Errorf("jsonpath template format specified but no template given") + } + var err error + printer, err = NewJSONPathPrinter(formatArgument) + if err != nil { + return nil, false, fmt.Errorf("error parsing jsonpath %s, %v\n", formatArgument, err) + } + case "jsonpath-file": + if len(formatArgument) == 0 { + return nil, false, fmt.Errorf("jsonpath file format specified but no template file file given") + } + data, err := ioutil.ReadFile(formatArgument) + if err != nil { + return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) + } + printer, err = NewJSONPathPrinter(string(data)) + if err != nil { + return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) + } + case "custom-columns": + var err error + if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, api.Codecs.UniversalDecoder()); err != nil { + return nil, false, err + } + case "custom-columns-file": + file, err := os.Open(formatArgument) + if err != nil { + return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) + } + if printer, err = NewCustomColumnsPrinterFromTemplate(file, api.Codecs.UniversalDecoder()); err != nil { + return nil, false, err + } + case "wide": + fallthrough + case "": + return nil, false, nil + default: + return nil, false, fmt.Errorf("output format %q not recognized", format) + } + return printer, true, nil +} + +// ResourcePrinter is an interface that knows how to print runtime objects. +type ResourcePrinter interface { + // Print receives a runtime object, formats it and prints it to a writer. + PrintObj(runtime.Object, io.Writer) error + HandledResources() []string +} + +// ResourcePrinterFunc is a function that can print objects +type ResourcePrinterFunc func(runtime.Object, io.Writer) error + +// PrintObj implements ResourcePrinter +func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { + return fn(obj, w) +} + +// TODO: implement HandledResources() +func (fn ResourcePrinterFunc) HandledResources() []string { + return []string{} +} + +// VersionedPrinter takes runtime objects and ensures they are converted to a given API version +// prior to being passed to a nested printer. +type VersionedPrinter struct { + printer ResourcePrinter + convertor runtime.ObjectConvertor + versions []unversioned.GroupVersion +} + +// NewVersionedPrinter wraps a printer to convert objects to a known API version prior to printing. +func NewVersionedPrinter(printer ResourcePrinter, convertor runtime.ObjectConvertor, versions ...unversioned.GroupVersion) ResourcePrinter { + return &VersionedPrinter{ + printer: printer, + convertor: convertor, + versions: versions, + } +} + +// PrintObj implements ResourcePrinter +func (p *VersionedPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if len(p.versions) == 0 { + return fmt.Errorf("no version specified, object cannot be converted") + } + converted, err := p.convertor.ConvertToVersion(obj, unversioned.GroupVersions(p.versions)) + if err != nil { + return err + } + return p.printer.PrintObj(converted, w) +} + +// TODO: implement HandledResources() +func (p *VersionedPrinter) HandledResources() []string { + return []string{} +} + +// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. +type NamePrinter struct { + Decoder runtime.Decoder + Typer runtime.ObjectTyper +} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object +// and print "resource/name" pair. If the object is a List, print all items in it. +func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if meta.IsListType(obj) { + items, err := meta.ExtractList(obj) + if err != nil { + return err + } + if errs := runtime.DecodeList(items, p.Decoder, runtime.UnstructuredJSONScheme); len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + for _, obj := range items { + if err := p.PrintObj(obj, w); err != nil { + return err + } + } + return nil + } + + // TODO: this is wrong, runtime.Unknown and runtime.Unstructured are not handled properly here. + + name := "" + if acc, err := meta.Accessor(obj); err == nil { + if n := acc.GetName(); len(n) > 0 { + name = n + } + } + + if gvks, _, err := p.Typer.ObjectKinds(obj); err == nil { + // TODO: this is wrong, it assumes that meta knows about all Kinds - should take a RESTMapper + _, resource := meta.KindToResource(gvks[0]) + fmt.Fprintf(w, "%s/%s\n", resource.Resource, name) + } else { + fmt.Fprintf(w, "/%s\n", name) + } + + return nil +} + +// TODO: implement HandledResources() +func (p *NamePrinter) HandledResources() []string { + return []string{} +} + +// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. +type JSONPrinter struct { +} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. +func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + switch obj := obj.(type) { + case *runtime.Unknown: + _, err := w.Write(obj.Raw) + return err + } + + data, err := json.Marshal(obj) + if err != nil { + return err + } + dst := bytes.Buffer{} + err = json.Indent(&dst, data, "", " ") + dst.WriteByte('\n') + _, err = w.Write(dst.Bytes()) + return err +} + +// TODO: implement HandledResources() +func (p *JSONPrinter) HandledResources() []string { + return []string{} +} + +// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. +// The input object is assumed to be in the internal version of an API and is converted +// to the given version first. +type YAMLPrinter struct { + version string + convertor runtime.ObjectConvertor +} + +// PrintObj prints the data as YAML. +func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + switch obj := obj.(type) { + case *runtime.Unknown: + data, err := yaml.JSONToYAML(obj.Raw) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + output, err := yaml.Marshal(obj) + if err != nil { + return err + } + _, err = fmt.Fprint(w, string(output)) + return err +} + +// TODO: implement HandledResources() +func (p *YAMLPrinter) HandledResources() []string { + return []string{} +} + +type handlerEntry struct { + columns []string + printFunc reflect.Value +} + +type PrintOptions struct { + NoHeaders bool + WithNamespace bool + Wide bool + ShowAll bool + ShowLabels bool + AbsoluteTimestamps bool + ColumnLabels []string +} + +// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide +// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers +// will only be printed if the object type changes. This makes it useful for printing items +// received from watches. +type HumanReadablePrinter struct { + handlerMap map[reflect.Type]*handlerEntry + options PrintOptions + lastType reflect.Type +} + +// NewHumanReadablePrinter creates a HumanReadablePrinter. +func NewHumanReadablePrinter(noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) *HumanReadablePrinter { + printer := &HumanReadablePrinter{ + handlerMap: make(map[reflect.Type]*handlerEntry), + options: PrintOptions{ + NoHeaders: noHeaders, + WithNamespace: withNamespace, + Wide: wide, + ShowAll: showAll, + ShowLabels: showLabels, + AbsoluteTimestamps: absoluteTimestamps, + ColumnLabels: columnLabels, + }, + } + printer.addDefaultHandlers() + return printer +} + +// Handler adds a print handler with a given set of columns to HumanReadablePrinter instance. +// See validatePrintHandlerFunc for required method signature. +func (h *HumanReadablePrinter) Handler(columns []string, printFunc interface{}) error { + printFuncValue := reflect.ValueOf(printFunc) + if err := h.validatePrintHandlerFunc(printFuncValue); err != nil { + glog.Errorf("Unable to add print handler: %v", err) + return err + } + objType := printFuncValue.Type().In(0) + h.handlerMap[objType] = &handlerEntry{ + columns: columns, + printFunc: printFuncValue, + } + return nil +} + +// validatePrintHandlerFunc validates print handler signature. +// printFunc is the function that will be called to print an object. +// It must be of the following type: +// func printFunc(object ObjectType, w io.Writer, options PrintOptions) error +// where ObjectType is the type of the object that will be printed. +func (h *HumanReadablePrinter) validatePrintHandlerFunc(printFunc reflect.Value) error { + if printFunc.Kind() != reflect.Func { + return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) + } + funcType := printFunc.Type() + if funcType.NumIn() != 3 || funcType.NumOut() != 1 { + return fmt.Errorf("invalid print handler." + + "Must accept 3 parameters and return 1 value.") + } + if funcType.In(1) != reflect.TypeOf((*io.Writer)(nil)).Elem() || + funcType.In(2) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || + funcType.Out(0) != reflect.TypeOf((*error)(nil)).Elem() { + return fmt.Errorf("invalid print handler. The expected signature is: "+ + "func handler(obj %v, w io.Writer, options PrintOptions) error", funcType.In(0)) + } + return nil +} + +func (h *HumanReadablePrinter) HandledResources() []string { + keys := make([]string, 0) + + for k := range h.handlerMap { + // k.String looks like "*api.PodList" and we want just "pod" + api := strings.Split(k.String(), ".") + resource := api[len(api)-1] + if strings.HasSuffix(resource, "List") { + continue + } + resource = strings.ToLower(resource) + keys = append(keys, resource) + } + return keys +} + +// NOTE: When adding a new resource type here, please update the list +// pkg/kubectl/cmd/get.go to reflect the new resource type. +var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"} +var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"} +var replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} +var replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} +var jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"} +var serviceColumns = []string{"NAME", "CLUSTER-IP", "EXTERNAL-IP", "PORT(S)", "AGE"} +var ingressColumns = []string{"NAME", "HOSTS", "ADDRESS", "PORTS", "AGE"} +var petSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} +var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} +var nodeColumns = []string{"NAME", "STATUS", "AGE"} +var daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "NODE-SELECTOR", "AGE"} +var eventColumns = []string{"LASTSEEN", "FIRSTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"} +var limitRangeColumns = []string{"NAME", "AGE"} +var resourceQuotaColumns = []string{"NAME", "AGE"} +var namespaceColumns = []string{"NAME", "STATUS", "AGE"} +var secretColumns = []string{"NAME", "TYPE", "DATA", "AGE"} +var serviceAccountColumns = []string{"NAME", "SECRETS", "AGE"} +var persistentVolumeColumns = []string{"NAME", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM", "REASON", "AGE"} +var persistentVolumeClaimColumns = []string{"NAME", "STATUS", "VOLUME", "CAPACITY", "ACCESSMODES", "AGE"} +var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} +var thirdPartyResourceColumns = []string{"NAME", "DESCRIPTION", "VERSION(S)"} +var roleColumns = []string{"NAME", "AGE"} +var roleBindingColumns = []string{"NAME", "AGE"} +var clusterRoleColumns = []string{"NAME", "AGE"} +var clusterRoleBindingColumns = []string{"NAME", "AGE"} + +// TODO: consider having 'KIND' for third party resource data +var thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} +var horizontalPodAutoscalerColumns = []string{"NAME", "REFERENCE", "TARGET", "CURRENT", "MINPODS", "MAXPODS", "AGE"} +var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. +var deploymentColumns = []string{"NAME", "DESIRED", "CURRENT", "UP-TO-DATE", "AVAILABLE", "AGE"} +var configMapColumns = []string{"NAME", "DATA", "AGE"} +var podSecurityPolicyColumns = []string{"NAME", "PRIV", "CAPS", "VOLUMEPLUGINS", "SELINUX", "RUNASUSER"} +var clusterColumns = []string{"NAME", "STATUS", "VERSION", "AGE"} +var networkPolicyColumns = []string{"NAME", "POD-SELECTOR", "AGE"} +var securityContextConstraintsColumns = []string{"NAME", "PRIV", "CAPS", "SELINUX", "RUNASUSER", "FSGROUP", "SUPGROUP", "PRIORITY", "READONLYROOTFS", "VOLUMES"} + +// addDefaultHandlers adds print handlers for default Kubernetes types. +func (h *HumanReadablePrinter) addDefaultHandlers() { + h.Handler(podColumns, printPod) + h.Handler(podColumns, printPodList) + h.Handler(podTemplateColumns, printPodTemplate) + h.Handler(podTemplateColumns, printPodTemplateList) + h.Handler(replicationControllerColumns, printReplicationController) + h.Handler(replicationControllerColumns, printReplicationControllerList) + h.Handler(replicaSetColumns, printReplicaSet) + h.Handler(replicaSetColumns, printReplicaSetList) + h.Handler(daemonSetColumns, printDaemonSet) + h.Handler(daemonSetColumns, printDaemonSetList) + h.Handler(jobColumns, printJob) + h.Handler(jobColumns, printJobList) + h.Handler(serviceColumns, printService) + h.Handler(serviceColumns, printServiceList) + h.Handler(ingressColumns, printIngress) + h.Handler(ingressColumns, printIngressList) + h.Handler(petSetColumns, printPetSet) + h.Handler(petSetColumns, printPetSetList) + h.Handler(endpointColumns, printEndpoints) + h.Handler(endpointColumns, printEndpointsList) + h.Handler(nodeColumns, printNode) + h.Handler(nodeColumns, printNodeList) + h.Handler(eventColumns, printEvent) + h.Handler(eventColumns, printEventList) + h.Handler(limitRangeColumns, printLimitRange) + h.Handler(limitRangeColumns, printLimitRangeList) + h.Handler(resourceQuotaColumns, printResourceQuota) + h.Handler(resourceQuotaColumns, printResourceQuotaList) + h.Handler(namespaceColumns, printNamespace) + h.Handler(namespaceColumns, printNamespaceList) + h.Handler(secretColumns, printSecret) + h.Handler(secretColumns, printSecretList) + h.Handler(serviceAccountColumns, printServiceAccount) + h.Handler(serviceAccountColumns, printServiceAccountList) + h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaim) + h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaimList) + h.Handler(persistentVolumeColumns, printPersistentVolume) + h.Handler(persistentVolumeColumns, printPersistentVolumeList) + h.Handler(componentStatusColumns, printComponentStatus) + h.Handler(componentStatusColumns, printComponentStatusList) + h.Handler(thirdPartyResourceColumns, printThirdPartyResource) + h.Handler(thirdPartyResourceColumns, printThirdPartyResourceList) + h.Handler(deploymentColumns, printDeployment) + h.Handler(deploymentColumns, printDeploymentList) + h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscaler) + h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscalerList) + h.Handler(configMapColumns, printConfigMap) + h.Handler(configMapColumns, printConfigMapList) + h.Handler(podSecurityPolicyColumns, printPodSecurityPolicy) + h.Handler(podSecurityPolicyColumns, printPodSecurityPolicyList) + h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceData) + h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceDataList) + h.Handler(clusterColumns, printCluster) + h.Handler(clusterColumns, printClusterList) + h.Handler(networkPolicyColumns, printNetworkPolicy) + h.Handler(networkPolicyColumns, printNetworkPolicyList) + h.Handler(roleColumns, printRole) + h.Handler(roleColumns, printRoleList) + h.Handler(roleBindingColumns, printRoleBinding) + h.Handler(roleBindingColumns, printRoleBindingList) + h.Handler(clusterRoleColumns, printClusterRole) + h.Handler(clusterRoleColumns, printClusterRoleList) + h.Handler(clusterRoleBindingColumns, printClusterRoleBinding) + h.Handler(clusterRoleBindingColumns, printClusterRoleBindingList) + h.Handler(securityContextConstraintsColumns, printSecurityContextConstraints) + h.Handler(securityContextConstraintsColumns, printSecurityContextConstraintsList) +} + +func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { + _, err := fmt.Fprintf(w, "Unknown object: %s", string(data)) + return err +} + +func (h *HumanReadablePrinter) printHeader(columnNames []string, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { + return err + } + return nil +} + +// Pass ports=nil for all ports. +func formatEndpoints(endpoints *api.Endpoints, ports sets.String) string { + if len(endpoints.Subsets) == 0 { + return "" + } + list := []string{} + max := 3 + more := false + count := 0 + for i := range endpoints.Subsets { + ss := &endpoints.Subsets[i] + for i := range ss.Ports { + port := &ss.Ports[i] + if ports == nil || ports.Has(port.Name) { + for i := range ss.Addresses { + if len(list) == max { + more = true + } + addr := &ss.Addresses[i] + if !more { + list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) + } + count++ + } + } + } + } + ret := strings.Join(list, ",") + if more { + return fmt.Sprintf("%s + %d more...", ret, count-max) + } + return ret +} + +func shortHumanDuration(d time.Duration) string { + // Allow deviation no more than 2 seconds(excluded) to tolerate machine time + // inconsistence, it can be considered as almost now. + if seconds := int(d.Seconds()); seconds < -1 { + return fmt.Sprintf("") + } else if seconds < 0 { + return fmt.Sprintf("0s") + } else if seconds < 60 { + return fmt.Sprintf("%ds", seconds) + } else if minutes := int(d.Minutes()); minutes < 60 { + return fmt.Sprintf("%dm", minutes) + } else if hours := int(d.Hours()); hours < 24 { + return fmt.Sprintf("%dh", hours) + } else if hours < 24*364 { + return fmt.Sprintf("%dd", hours/24) + } + return fmt.Sprintf("%dy", int(d.Hours()/24/365)) +} + +// translateTimestamp returns the elapsed time since timestamp in +// human-readable approximation. +func translateTimestamp(timestamp unversioned.Time) string { + if timestamp.IsZero() { + return "" + } + return shortHumanDuration(time.Now().Sub(timestamp.Time)) +} + +func printPod(pod *api.Pod, w io.Writer, options PrintOptions) error { + return printPodBase(pod, w, options) +} + +func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { + name := pod.Name + namespace := pod.Namespace + + restarts := 0 + totalContainers := len(pod.Spec.Containers) + readyContainers := 0 + + reason := string(pod.Status.Phase) + // if not printing all pods, skip terminated pods (default) + if !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) { + return nil + } + if pod.Status.Reason != "" { + reason = pod.Status.Reason + } + + initializing := false + for i := range pod.Status.InitContainerStatuses { + container := pod.Status.InitContainerStatuses[i] + switch { + case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: + continue + case container.State.Terminated != nil: + // initialization is failed + if len(container.State.Terminated.Reason) == 0 { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) + } + } else { + reason = "Init:" + container.State.Terminated.Reason + } + initializing = true + case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": + reason = "Init:" + container.State.Waiting.Reason + initializing = true + default: + reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) + initializing = true + } + break + } + if !initializing { + for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { + container := pod.Status.ContainerStatuses[i] + + restarts += int(container.RestartCount) + if container.State.Waiting != nil && container.State.Waiting.Reason != "" { + reason = container.State.Waiting.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { + reason = container.State.Terminated.Reason + } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { + if container.State.Terminated.Signal != 0 { + reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) + } else { + reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) + } + } else if container.Ready && container.State.Running != nil { + readyContainers++ + } + } + } + if pod.DeletionTimestamp != nil { + reason = "Terminating" + } + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%d/%d\t%s\t%d\t%s", + name, + readyContainers, + totalContainers, + reason, + restarts, + translateTimestamp(pod.CreationTimestamp), + ); err != nil { + return err + } + + if options.Wide { + nodeName := pod.Spec.NodeName + podIP := pod.Status.PodIP + if podIP == "" { + podIP = "" + } + if _, err := fmt.Fprintf(w, "\t%s\t%s", + podIP, + nodeName, + ); err != nil { + return err + } + } + + if _, err := fmt.Fprint(w, AppendLabels(pod.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pod.Labels)); err != nil { + return err + } + + return nil +} + +func printPodList(podList *api.PodList, w io.Writer, options PrintOptions) error { + for _, pod := range podList.Items { + if err := printPodBase(&pod, w, options); err != nil { + return err + } + } + return nil +} + +func printPodTemplate(pod *api.PodTemplate, w io.Writer, options PrintOptions) error { + name := pod.Name + namespace := pod.Namespace + + containers := pod.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s", name); err != nil { + return err + } + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(pod.Template.Labels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(pod.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pod.Labels)); err != nil { + return err + } + + return nil +} + +func printPodTemplateList(podList *api.PodTemplateList, w io.Writer, options PrintOptions) error { + for _, pod := range podList.Items { + if err := printPodTemplate(&pod, w, options); err != nil { + return err + } + } + return nil +} + +// TODO(AdoHe): try to put wide output in a single method +func printReplicationController(controller *api.ReplicationController, w io.Writer, options PrintOptions) error { + name := controller.Name + namespace := controller.Namespace + containers := controller.Spec.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + desiredReplicas := controller.Spec.Replicas + currentReplicas := controller.Status.Replicas + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", + name, + desiredReplicas, + currentReplicas, + translateTimestamp(controller.CreationTimestamp), + ); err != nil { + return err + } + + if options.Wide { + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(controller.Spec.Selector)); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, AppendLabels(controller.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, controller.Labels)); err != nil { + return err + } + + return nil +} + +func printReplicationControllerList(list *api.ReplicationControllerList, w io.Writer, options PrintOptions) error { + for _, controller := range list.Items { + if err := printReplicationController(&controller, w, options); err != nil { + return err + } + } + return nil +} + +func printReplicaSet(rs *extensions.ReplicaSet, w io.Writer, options PrintOptions) error { + name := rs.Name + namespace := rs.Namespace + containers := rs.Spec.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + desiredReplicas := rs.Spec.Replicas + currentReplicas := rs.Status.Replicas + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", + name, + desiredReplicas, + currentReplicas, + translateTimestamp(rs.CreationTimestamp), + ); err != nil { + return err + } + if options.Wide { + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(rs.Spec.Selector)); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, AppendLabels(rs.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, rs.Labels)); err != nil { + return err + } + + return nil +} + +func printReplicaSetList(list *extensions.ReplicaSetList, w io.Writer, options PrintOptions) error { + for _, rs := range list.Items { + if err := printReplicaSet(&rs, w, options); err != nil { + return err + } + } + return nil +} + +func printCluster(c *federation.Cluster, w io.Writer, options PrintOptions) error { + var statuses []string + for _, condition := range c.Status.Conditions { + if condition.Status == api.ConditionTrue { + statuses = append(statuses, string(condition.Type)) + } else { + statuses = append(statuses, "Not"+string(condition.Type)) + } + } + if len(statuses) == 0 { + statuses = append(statuses, "Unknown") + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", + c.Name, + strings.Join(statuses, ","), + translateTimestamp(c.CreationTimestamp), + ); err != nil { + return err + } + return nil +} +func printClusterList(list *federation.ClusterList, w io.Writer, options PrintOptions) error { + for _, rs := range list.Items { + if err := printCluster(&rs, w, options); err != nil { + return err + } + } + return nil +} + +func printJob(job *batch.Job, w io.Writer, options PrintOptions) error { + name := job.Name + namespace := job.Namespace + containers := job.Spec.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + selector, err := unversioned.LabelSelectorAsSelector(job.Spec.Selector) + if err != nil { + // this shouldn't happen if LabelSelector passed validation + return err + } + if job.Spec.Completions != nil { + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", + name, + *job.Spec.Completions, + job.Status.Succeeded, + translateTimestamp(job.CreationTimestamp), + ); err != nil { + return err + } + } else { + if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s", + name, + "", + job.Status.Succeeded, + translateTimestamp(job.CreationTimestamp), + ); err != nil { + return err + } + } + if options.Wide { + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", selector.String()); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, AppendLabels(job.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, job.Labels)); err != nil { + return err + } + + return nil +} + +func printJobList(list *batch.JobList, w io.Writer, options PrintOptions) error { + for _, job := range list.Items { + if err := printJob(&job, w, options); err != nil { + return err + } + } + return nil +} + +// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. +// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. +func loadBalancerStatusStringer(s api.LoadBalancerStatus, wide bool) string { + ingress := s.Ingress + result := []string{} + for i := range ingress { + if ingress[i].IP != "" { + result = append(result, ingress[i].IP) + } else if ingress[i].Hostname != "" { + result = append(result, ingress[i].Hostname) + } + } + r := strings.Join(result, ",") + if !wide && len(r) > loadBalancerWidth { + r = r[0:(loadBalancerWidth-3)] + "..." + } + return r +} + +func getServiceExternalIP(svc *api.Service, wide bool) string { + switch svc.Spec.Type { + case api.ServiceTypeClusterIP: + if len(svc.Spec.ExternalIPs) > 0 { + return strings.Join(svc.Spec.ExternalIPs, ",") + } + return "" + case api.ServiceTypeNodePort: + if len(svc.Spec.ExternalIPs) > 0 { + return strings.Join(svc.Spec.ExternalIPs, ",") + } + return "" + case api.ServiceTypeLoadBalancer: + lbIps := loadBalancerStatusStringer(svc.Status.LoadBalancer, wide) + if len(svc.Spec.ExternalIPs) > 0 { + result := append(strings.Split(lbIps, ","), svc.Spec.ExternalIPs...) + return strings.Join(result, ",") + } + if len(lbIps) > 0 { + return lbIps + } + return "" + } + return "" +} + +func makePortString(ports []api.ServicePort) string { + pieces := make([]string, len(ports)) + for ix := range ports { + port := &ports[ix] + pieces[ix] = fmt.Sprintf("%d/%s", port.Port, port.Protocol) + } + return strings.Join(pieces, ",") +} + +func printService(svc *api.Service, w io.Writer, options PrintOptions) error { + name := svc.Name + namespace := svc.Namespace + + internalIP := svc.Spec.ClusterIP + externalIP := getServiceExternalIP(svc, options.Wide) + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", + name, + internalIP, + externalIP, + makePortString(svc.Spec.Ports), + translateTimestamp(svc.CreationTimestamp), + ); err != nil { + return err + } + if options.Wide { + if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(svc.Spec.Selector)); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, AppendLabels(svc.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, svc.Labels)) + return err +} + +func printServiceList(list *api.ServiceList, w io.Writer, options PrintOptions) error { + for _, svc := range list.Items { + if err := printService(&svc, w, options); err != nil { + return err + } + } + return nil +} + +// backendStringer behaves just like a string interface and converts the given backend to a string. +func backendStringer(backend *extensions.IngressBackend) string { + if backend == nil { + return "" + } + return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) +} + +func formatHosts(rules []extensions.IngressRule) string { + list := []string{} + max := 3 + more := false + for _, rule := range rules { + if len(list) == max { + more = true + } + if !more && len(rule.Host) != 0 { + list = append(list, rule.Host) + } + } + if len(list) == 0 { + return "*" + } + ret := strings.Join(list, ",") + if more { + return fmt.Sprintf("%s + %d more...", ret, len(rules)-max) + } + return ret +} + +func formatPorts(tls []extensions.IngressTLS) string { + if len(tls) != 0 { + return "80, 443" + } + return "80" +} + +func printIngress(ingress *extensions.Ingress, w io.Writer, options PrintOptions) error { + name := ingress.Name + namespace := ingress.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + if _, err := fmt.Fprintf(w, "%s\t%v\t%v\t%v\t%s", + name, + formatHosts(ingress.Spec.Rules), + loadBalancerStatusStringer(ingress.Status.LoadBalancer, options.Wide), + formatPorts(ingress.Spec.TLS), + translateTimestamp(ingress.CreationTimestamp), + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(ingress.Labels, options.ColumnLabels)); err != nil { + return err + } + + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ingress.Labels)); err != nil { + return err + } + return nil +} + +func printIngressList(ingressList *extensions.IngressList, w io.Writer, options PrintOptions) error { + for _, ingress := range ingressList.Items { + if err := printIngress(&ingress, w, options); err != nil { + return err + } + } + return nil +} + +func printPetSet(ps *apps.PetSet, w io.Writer, options PrintOptions) error { + name := ps.Name + namespace := ps.Namespace + containers := ps.Spec.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + desiredReplicas := ps.Spec.Replicas + currentReplicas := ps.Status.Replicas + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", + name, + desiredReplicas, + currentReplicas, + translateTimestamp(ps.CreationTimestamp), + ); err != nil { + return err + } + if options.Wide { + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(ps.Spec.Selector)); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, AppendLabels(ps.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ps.Labels)); err != nil { + return err + } + + return nil +} + +func printPetSetList(petSetList *apps.PetSetList, w io.Writer, options PrintOptions) error { + for _, ps := range petSetList.Items { + if err := printPetSet(&ps, w, options); err != nil { + return err + } + } + return nil +} + +func printDaemonSet(ds *extensions.DaemonSet, w io.Writer, options PrintOptions) error { + name := ds.Name + namespace := ds.Namespace + + containers := ds.Spec.Template.Spec.Containers + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + desiredScheduled := ds.Status.DesiredNumberScheduled + currentScheduled := ds.Status.CurrentNumberScheduled + selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + // this shouldn't happen if LabelSelector passed validation + return err + } + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s\t%s", + name, + desiredScheduled, + currentScheduled, + labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector), + translateTimestamp(ds.CreationTimestamp), + ); err != nil { + return err + } + if options.Wide { + if err := layoutContainers(containers, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\t%s", selector.String()); err != nil { + return err + } + } + if _, err := fmt.Fprint(w, AppendLabels(ds.Labels, options.ColumnLabels)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ds.Labels)); err != nil { + return err + } + + return nil +} + +func printDaemonSetList(list *extensions.DaemonSetList, w io.Writer, options PrintOptions) error { + for _, ds := range list.Items { + if err := printDaemonSet(&ds, w, options); err != nil { + return err + } + } + return nil +} + +func printEndpoints(endpoints *api.Endpoints, w io.Writer, options PrintOptions) error { + name := endpoints.Name + namespace := endpoints.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, formatEndpoints(endpoints, nil), translateTimestamp(endpoints.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(endpoints.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, endpoints.Labels)) + return err +} + +func printEndpointsList(list *api.EndpointsList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printEndpoints(&item, w, options); err != nil { + return err + } + } + return nil +} + +func printNamespace(item *api.Namespace, w io.Writer, options PrintOptions) error { + if options.WithNamespace { + return fmt.Errorf("namespace is not namespaced") + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s", item.Name, item.Status.Phase, translateTimestamp(item.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) + return err +} + +func printNamespaceList(list *api.NamespaceList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printNamespace(&item, w, options); err != nil { + return err + } + } + return nil +} + +func printSecret(item *api.Secret, w io.Writer, options PrintOptions) error { + name := item.Name + namespace := item.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%v\t%s", name, item.Type, len(item.Data), translateTimestamp(item.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) + return err +} + +func printSecretList(list *api.SecretList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printSecret(&item, w, options); err != nil { + return err + } + } + + return nil +} + +func printServiceAccount(item *api.ServiceAccount, w io.Writer, options PrintOptions) error { + name := item.Name + namespace := item.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%d\t%s", name, len(item.Secrets), translateTimestamp(item.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) + return err +} + +func printServiceAccountList(list *api.ServiceAccountList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printServiceAccount(&item, w, options); err != nil { + return err + } + } + + return nil +} + +func printNode(node *api.Node, w io.Writer, options PrintOptions) error { + if options.WithNamespace { + return fmt.Errorf("node is not namespaced") + } + conditionMap := make(map[api.NodeConditionType]*api.NodeCondition) + NodeAllConditions := []api.NodeConditionType{api.NodeReady} + for i := range node.Status.Conditions { + cond := node.Status.Conditions[i] + conditionMap[cond.Type] = &cond + } + var status []string + for _, validCondition := range NodeAllConditions { + if condition, ok := conditionMap[validCondition]; ok { + if condition.Status == api.ConditionTrue { + status = append(status, string(condition.Type)) + } else { + status = append(status, "Not"+string(condition.Type)) + } + } + } + if len(status) == 0 { + status = append(status, "Unknown") + } + if node.Spec.Unschedulable { + status = append(status, "SchedulingDisabled") + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s", node.Name, strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil { + return err + } + // Display caller specify column labels first. + if _, err := fmt.Fprint(w, AppendLabels(node.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, node.Labels)) + return err +} + +func printNodeList(list *api.NodeList, w io.Writer, options PrintOptions) error { + for _, node := range list.Items { + if err := printNode(&node, w, options); err != nil { + return err + } + } + return nil +} + +func printPersistentVolume(pv *api.PersistentVolume, w io.Writer, options PrintOptions) error { + if options.WithNamespace { + return fmt.Errorf("persistentVolume is not namespaced") + } + name := pv.Name + + claimRefUID := "" + if pv.Spec.ClaimRef != nil { + claimRefUID += pv.Spec.ClaimRef.Namespace + claimRefUID += "/" + claimRefUID += pv.Spec.ClaimRef.Name + } + + modesStr := api.GetAccessModesAsString(pv.Spec.AccessModes) + + aQty := pv.Spec.Capacity[api.ResourceStorage] + aSize := aQty.String() + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s", + name, + aSize, modesStr, + pv.Status.Phase, + claimRefUID, + pv.Status.Reason, + translateTimestamp(pv.CreationTimestamp), + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(pv.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pv.Labels)) + return err +} + +func printPersistentVolumeList(list *api.PersistentVolumeList, w io.Writer, options PrintOptions) error { + for _, pv := range list.Items { + if err := printPersistentVolume(&pv, w, options); err != nil { + return err + } + } + return nil +} + +func printPersistentVolumeClaim(pvc *api.PersistentVolumeClaim, w io.Writer, options PrintOptions) error { + name := pvc.Name + namespace := pvc.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + phase := pvc.Status.Phase + storage := pvc.Spec.Resources.Requests[api.ResourceStorage] + capacity := "" + accessModes := "" + if pvc.Spec.VolumeName != "" { + accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) + storage = pvc.Status.Capacity[api.ResourceStorage] + capacity = storage.String() + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", name, phase, pvc.Spec.VolumeName, capacity, accessModes, translateTimestamp(pvc.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(pvc.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pvc.Labels)) + return err +} + +func printPersistentVolumeClaimList(list *api.PersistentVolumeClaimList, w io.Writer, options PrintOptions) error { + for _, psd := range list.Items { + if err := printPersistentVolumeClaim(&psd, w, options); err != nil { + return err + } + } + return nil +} + +func printEvent(event *api.Event, w io.Writer, options PrintOptions) error { + namespace := event.Namespace + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + // While watching event, we should print absolute time. + var FirstTimestamp, LastTimestamp string + if options.AbsoluteTimestamps { + FirstTimestamp = event.FirstTimestamp.String() + LastTimestamp = event.LastTimestamp.String() + } else { + FirstTimestamp = translateTimestamp(event.FirstTimestamp) + LastTimestamp = translateTimestamp(event.LastTimestamp) + } + + if _, err := fmt.Fprintf( + w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s", + LastTimestamp, + FirstTimestamp, + event.Count, + event.InvolvedObject.Name, + event.InvolvedObject.Kind, + event.InvolvedObject.FieldPath, + event.Type, + event.Reason, + event.Source, + event.Message, + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(event.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, event.Labels)) + return err +} + +// Sorts and prints the EventList in a human-friendly format. +func printEventList(list *api.EventList, w io.Writer, options PrintOptions) error { + sort.Sort(SortableEvents(list.Items)) + for i := range list.Items { + if err := printEvent(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printLimitRange(limitRange *api.LimitRange, w io.Writer, options PrintOptions) error { + return printObjectMeta(limitRange.ObjectMeta, w, options, true) +} + +// Prints the LimitRangeList in a human-friendly format. +func printLimitRangeList(list *api.LimitRangeList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printLimitRange(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +// printObjectMeta prints the object metadata of a given resource. +func printObjectMeta(meta api.ObjectMeta, w io.Writer, options PrintOptions, namespaced bool) error { + if namespaced && options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", meta.Namespace); err != nil { + return err + } + } + + if _, err := fmt.Fprintf( + w, "%s\t%s", + meta.Name, + translateTimestamp(meta.CreationTimestamp), + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(meta.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, meta.Labels)) + return err +} + +func printResourceQuota(resourceQuota *api.ResourceQuota, w io.Writer, options PrintOptions) error { + return printObjectMeta(resourceQuota.ObjectMeta, w, options, true) +} + +// Prints the ResourceQuotaList in a human-friendly format. +func printResourceQuotaList(list *api.ResourceQuotaList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printResourceQuota(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printRole(role *rbac.Role, w io.Writer, options PrintOptions) error { + return printObjectMeta(role.ObjectMeta, w, options, true) +} + +// Prints the Role in a human-friendly format. +func printRoleList(list *rbac.RoleList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printRole(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printRoleBinding(roleBinding *rbac.RoleBinding, w io.Writer, options PrintOptions) error { + return printObjectMeta(roleBinding.ObjectMeta, w, options, true) +} + +// Prints the RoleBinding in a human-friendly format. +func printRoleBindingList(list *rbac.RoleBindingList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printRoleBinding(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printClusterRole(clusterRole *rbac.ClusterRole, w io.Writer, options PrintOptions) error { + return printObjectMeta(clusterRole.ObjectMeta, w, options, false) +} + +// Prints the ClusterRole in a human-friendly format. +func printClusterRoleList(list *rbac.ClusterRoleList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printClusterRole(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printClusterRoleBinding(clusterRoleBinding *rbac.ClusterRoleBinding, w io.Writer, options PrintOptions) error { + return printObjectMeta(clusterRoleBinding.ObjectMeta, w, options, false) +} + +// Prints the ClusterRoleBinding in a human-friendly format. +func printClusterRoleBindingList(list *rbac.ClusterRoleBindingList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printClusterRoleBinding(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printComponentStatus(item *api.ComponentStatus, w io.Writer, options PrintOptions) error { + if options.WithNamespace { + return fmt.Errorf("componentStatus is not namespaced") + } + status := "Unknown" + message := "" + error := "" + for _, condition := range item.Conditions { + if condition.Type == api.ComponentHealthy { + if condition.Status == api.ConditionTrue { + status = "Healthy" + } else { + status = "Unhealthy" + } + message = condition.Message + error = condition.Error + break + } + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", item.Name, status, message, error); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) + return err +} + +func printComponentStatusList(list *api.ComponentStatusList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printComponentStatus(&item, w, options); err != nil { + return err + } + } + + return nil +} + +func printThirdPartyResource(rsrc *extensions.ThirdPartyResource, w io.Writer, options PrintOptions) error { + versions := make([]string, len(rsrc.Versions)) + for ix := range rsrc.Versions { + version := &rsrc.Versions[ix] + versions[ix] = fmt.Sprintf("%s", version.Name) + } + versionsString := strings.Join(versions, ",") + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", rsrc.Name, rsrc.Description, versionsString); err != nil { + return err + } + return nil +} + +func printThirdPartyResourceList(list *extensions.ThirdPartyResourceList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printThirdPartyResource(&item, w, options); err != nil { + return err + } + } + + return nil +} + +func truncate(str string, maxLen int) string { + if len(str) > maxLen { + return str[0:maxLen] + "..." + } + return str +} + +func printThirdPartyResourceData(rsrc *extensions.ThirdPartyResourceData, w io.Writer, options PrintOptions) error { + l := labels.FormatLabels(rsrc.Labels) + truncateCols := 50 + if options.Wide { + truncateCols = 100 + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", rsrc.Name, l, truncate(string(rsrc.Data), truncateCols)); err != nil { + return err + } + return nil +} + +func printThirdPartyResourceDataList(list *extensions.ThirdPartyResourceDataList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printThirdPartyResourceData(&item, w, options); err != nil { + return err + } + } + + return nil +} + +func printDeployment(deployment *extensions.Deployment, w io.Writer, options PrintOptions) error { + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", deployment.Namespace); err != nil { + return err + } + } + + desiredReplicas := deployment.Spec.Replicas + currentReplicas := deployment.Status.Replicas + updatedReplicas := deployment.Status.UpdatedReplicas + availableReplicas := deployment.Status.AvailableReplicas + age := translateTimestamp(deployment.CreationTimestamp) + if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%d\t%s", deployment.Name, desiredReplicas, currentReplicas, updatedReplicas, availableReplicas, age); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(deployment.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, deployment.Labels)) + return err +} + +func printDeploymentList(list *extensions.DeploymentList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printDeployment(&item, w, options); err != nil { + return err + } + } + return nil +} + +func printHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, w io.Writer, options PrintOptions) error { + namespace := hpa.Namespace + name := hpa.Name + reference := fmt.Sprintf("%s/%s", + hpa.Spec.ScaleTargetRef.Kind, + hpa.Spec.ScaleTargetRef.Name) + target := "" + if hpa.Spec.TargetCPUUtilizationPercentage != nil { + target = fmt.Sprintf("%d%%", *hpa.Spec.TargetCPUUtilizationPercentage) + } + current := "" + if hpa.Status.CurrentCPUUtilizationPercentage != nil { + current = fmt.Sprintf("%d%%", *hpa.Status.CurrentCPUUtilizationPercentage) + } + minPods := "" + if hpa.Spec.MinReplicas != nil { + minPods = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) + } + maxPods := hpa.Spec.MaxReplicas + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%d\t%s", + name, + reference, + target, + current, + minPods, + maxPods, + translateTimestamp(hpa.CreationTimestamp), + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(hpa.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, hpa.Labels)) + return err +} + +func printHorizontalPodAutoscalerList(list *autoscaling.HorizontalPodAutoscalerList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printHorizontalPodAutoscaler(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printConfigMap(configMap *api.ConfigMap, w io.Writer, options PrintOptions) error { + name := configMap.Name + namespace := configMap.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, len(configMap.Data), translateTimestamp(configMap.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(configMap.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, configMap.Labels)) + return err +} + +func printConfigMapList(list *api.ConfigMapList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printConfigMap(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func printPodSecurityPolicy(item *extensions.PodSecurityPolicy, w io.Writer, options PrintOptions) error { + _, err := fmt.Fprintf(w, "%s\t%t\t%v\t%s\t%s\t%s\t%s\t%t\t%v\n", item.Name, item.Spec.Privileged, + item.Spec.AllowedCapabilities, item.Spec.SELinux.Rule, + item.Spec.RunAsUser.Rule, item.Spec.FSGroup.Rule, item.Spec.SupplementalGroups.Rule, item.Spec.ReadOnlyRootFilesystem, item.Spec.Volumes) + return err +} + +func printPodSecurityPolicyList(list *extensions.PodSecurityPolicyList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printPodSecurityPolicy(&item, w, options); err != nil { + return err + } + } + + return nil +} + +func printNetworkPolicy(networkPolicy *extensions.NetworkPolicy, w io.Writer, options PrintOptions) error { + name := networkPolicy.Name + namespace := networkPolicy.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, unversioned.FormatLabelSelector(&networkPolicy.Spec.PodSelector), translateTimestamp(networkPolicy.CreationTimestamp)); err != nil { + return err + } + if _, err := fmt.Fprint(w, AppendLabels(networkPolicy.Labels, options.ColumnLabels)); err != nil { + return err + } + _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, networkPolicy.Labels)) + return err +} + +func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, options PrintOptions) error { + for i := range list.Items { + if err := printNetworkPolicy(&list.Items[i], w, options); err != nil { + return err + } + } + return nil +} + +func AppendLabels(itemLabels map[string]string, columnLabels []string) string { + var buffer bytes.Buffer + + for _, cl := range columnLabels { + buffer.WriteString(fmt.Sprint("\t")) + if il, ok := itemLabels[cl]; ok { + buffer.WriteString(fmt.Sprint(il)) + } else { + buffer.WriteString("") + } + } + + return buffer.String() +} + +// Append all labels to a single column. We need this even when show-labels flag* is +// false, since this adds newline delimiter to the end of each row. +func AppendAllLabels(showLabels bool, itemLabels map[string]string) string { + var buffer bytes.Buffer + + if showLabels { + buffer.WriteString(fmt.Sprint("\t")) + buffer.WriteString(labels.FormatLabels(itemLabels)) + } + buffer.WriteString("\n") + + return buffer.String() +} + +// Append a set of tabs for each label column. We need this in the case where +// we have extra lines so that the tabwriter will still line things up. +func AppendLabelTabs(columnLabels []string) string { + var buffer bytes.Buffer + + for range columnLabels { + buffer.WriteString("\t") + } + buffer.WriteString("\n") + + return buffer.String() +} + +// Lay out all the containers on one line if use wide output. +func layoutContainers(containers []api.Container, w io.Writer) error { + var namesBuffer bytes.Buffer + var imagesBuffer bytes.Buffer + + for i, container := range containers { + namesBuffer.WriteString(container.Name) + imagesBuffer.WriteString(container.Image) + if i != len(containers)-1 { + namesBuffer.WriteString(",") + imagesBuffer.WriteString(",") + } + } + _, err := fmt.Fprintf(w, "\t%s\t%s", namesBuffer.String(), imagesBuffer.String()) + if err != nil { + return err + } + return nil +} + +func formatLabelHeaders(columnLabels []string) []string { + formHead := make([]string, len(columnLabels)) + for i, l := range columnLabels { + p := strings.Split(l, "/") + formHead[i] = strings.ToUpper((p[len(p)-1])) + } + return formHead +} + +// headers for -o wide +func formatWideHeaders(wide bool, t reflect.Type) []string { + if wide { + if t.String() == "*api.Pod" || t.String() == "*api.PodList" { + return []string{"IP", "NODE"} + } + if t.String() == "*api.ReplicationController" || t.String() == "*api.ReplicationControllerList" { + return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} + } + if t.String() == "*batch.Job" || t.String() == "*batch.JobList" { + return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} + } + if t.String() == "*api.Service" || t.String() == "*api.ServiceList" { + return []string{"SELECTOR"} + } + if t.String() == "*extensions.DaemonSet" || t.String() == "*extensions.DaemonSetList" { + return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} + } + if t.String() == "*extensions.ReplicaSet" || t.String() == "*extensions.ReplicaSetList" { + return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} + } + } + return nil +} + +// headers for --show-labels=true +func formatShowLabelsHeader(showLabels bool, t reflect.Type) []string { + if showLabels { + if t.String() != "*api.ThirdPartyResource" && t.String() != "*api.ThirdPartyResourceList" { + return []string{"LABELS"} + } + } + return nil +} + +// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. +func GetNewTabWriter(output io.Writer) *tabwriter.Writer { + return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) +} + +// PrintObj prints the obj in a human-friendly format according to the type of the obj. +func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { + // if output is a tabwriter (when it's called by kubectl get), we use it; create a new tabwriter otherwise + w, found := output.(*tabwriter.Writer) + if !found { + w = GetNewTabWriter(output) + defer w.Flush() + } + t := reflect.TypeOf(obj) + if handler := h.handlerMap[t]; handler != nil { + if !h.options.NoHeaders && t != h.lastType { + headers := append(handler.columns, formatWideHeaders(h.options.Wide, t)...) + headers = append(headers, formatLabelHeaders(h.options.ColumnLabels)...) + // LABELS is always the last column. + headers = append(headers, formatShowLabelsHeader(h.options.ShowLabels, t)...) + if h.options.WithNamespace { + headers = append(withNamespacePrefixColumns, headers...) + } + h.printHeader(headers, w) + h.lastType = t + } + args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(w), reflect.ValueOf(h.options)} + resultValue := handler.printFunc.Call(args)[0] + if resultValue.IsNil() { + return nil + } + return resultValue.Interface().(error) + } + return fmt.Errorf("error: unknown type %#v", obj) +} + +// TemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. +type TemplatePrinter struct { + rawTemplate string + template *template.Template +} + +func NewTemplatePrinter(tmpl []byte) (*TemplatePrinter, error) { + t, err := template.New("output"). + Funcs(template.FuncMap{"exists": exists}). + Parse(string(tmpl)) + if err != nil { + return nil, err + } + return &TemplatePrinter{ + rawTemplate: string(tmpl), + template: t, + }, nil +} + +// PrintObj formats the obj with the Go Template. +func (p *TemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + out := map[string]interface{}{} + if err := json.Unmarshal(data, &out); err != nil { + return err + } + if err = p.safeExecute(w, out); err != nil { + // It is way easier to debug this stuff when it shows up in + // stdout instead of just stdin. So in addition to returning + // a nice error, also print useful stuff with the writer. + fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate) + fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data)) + fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out) + return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err) + } + return nil +} + +// TODO: implement HandledResources() +func (p *TemplatePrinter) HandledResources() []string { + return []string{} +} + +// safeExecute tries to execute the template, but catches panics and returns an error +// should the template engine panic. +func (p *TemplatePrinter) safeExecute(w io.Writer, obj interface{}) error { + var panicErr error + // Sorry for the double anonymous function. There's probably a clever way + // to do this that has the defer'd func setting the value to be returned, but + // that would be even less obvious. + retErr := func() error { + defer func() { + if x := recover(); x != nil { + panicErr = fmt.Errorf("caught panic: %+v", x) + } + }() + return p.template.Execute(w, obj) + }() + if panicErr != nil { + return panicErr + } + return retErr +} + +func tabbedString(f func(io.Writer) error) (string, error) { + out := new(tabwriter.Writer) + buf := &bytes.Buffer{} + out.Init(buf, 0, 8, 1, '\t', 0) + + err := f(out) + if err != nil { + return "", err + } + + out.Flush() + str := string(buf.String()) + return str, nil +} + +// exists returns true if it would be possible to call the index function +// with these arguments. +// +// TODO: how to document this for users? +// +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func exists(item interface{}, indices ...interface{}) bool { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return false + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return false + } + if x < 0 || x >= int64(v.Len()) { + return false + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return false + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return false + } + } + if _, isNil := indirect(v); isNil { + return false + } + return true +} + +// stolen from text/template +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression. +type JSONPathPrinter struct { + rawTemplate string + *jsonpath.JSONPath +} + +func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) { + j := jsonpath.New("out") + if err := j.Parse(tmpl); err != nil { + return nil, err + } + return &JSONPathPrinter{tmpl, j}, nil +} + +// PrintObj formats the obj with the JSONPath Template. +func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + var queryObj interface{} = obj + if meta.IsListType(obj) { + data, err := json.Marshal(obj) + if err != nil { + return err + } + queryObj = map[string]interface{}{} + if err := json.Unmarshal(data, &queryObj); err != nil { + return err + } + } + + if err := j.JSONPath.Execute(w, queryObj); err != nil { + fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) + fmt.Fprintf(w, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) + return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, err) + } + return nil +} + +// TODO: implement HandledResources() +func (p *JSONPathPrinter) HandledResources() []string { + return []string{} +} + +func printSecurityContextConstraints(item *api.SecurityContextConstraints, w io.Writer, options PrintOptions) error { + priority := "" + if item.Priority != nil { + priority = fmt.Sprintf("%d", *item.Priority) + } + + _, err := fmt.Fprintf(w, "%s\t%t\t%v\t%s\t%s\t%s\t%s\t%s\t%t\t%v\n", item.Name, item.AllowPrivilegedContainer, + item.AllowedCapabilities, item.SELinuxContext.Type, + item.RunAsUser.Type, item.FSGroup.Type, item.SupplementalGroups.Type, priority, item.ReadOnlyRootFilesystem, item.Volumes) + return err +} + +func printSecurityContextConstraintsList(list *api.SecurityContextConstraintsList, w io.Writer, options PrintOptions) error { + for _, item := range list.Items { + if err := printSecurityContextConstraints(&item, w, options); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go new file mode 100644 index 00000000..2e4f92b3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/runtime" + deploymentutil "k8s.io/kubernetes/pkg/util/deployment" + "k8s.io/kubernetes/pkg/watch" +) + +// Rollbacker provides an interface for resources that can be rolled back. +type Rollbacker interface { + Rollback(namespace, name string, updatedAnnotations map[string]string, toRevision int64, obj runtime.Object) (string, error) +} + +func RollbackerFor(kind unversioned.GroupKind, c client.Interface) (Rollbacker, error) { + switch kind { + case extensions.Kind("Deployment"): + return &DeploymentRollbacker{c}, nil + } + return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) +} + +type DeploymentRollbacker struct { + c client.Interface +} + +func (r *DeploymentRollbacker) Rollback(namespace, name string, updatedAnnotations map[string]string, toRevision int64, obj runtime.Object) (string, error) { + d := obj.(*extensions.Deployment) + if d.Spec.Paused { + return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume' and try again") + } + deploymentRollback := &extensions.DeploymentRollback{ + Name: name, + UpdatedAnnotations: updatedAnnotations, + RollbackTo: extensions.RollbackConfig{ + Revision: toRevision, + }, + } + result := "" + + // Get current events + events, err := r.c.Events(namespace).List(api.ListOptions{}) + if err != nil { + return result, err + } + // Do the rollback + if err := r.c.Extensions().Deployments(namespace).Rollback(deploymentRollback); err != nil { + return result, err + } + // Watch for the changes of events + watch, err := r.c.Events(namespace).Watch(api.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) + if err != nil { + return result, err + } + result = watchRollbackEvent(watch) + return result, err +} + +// watchRollbackEvent watches for rollback events and returns rollback result +func watchRollbackEvent(w watch.Interface) string { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM) + for { + select { + case event, ok := <-w.ResultChan(): + if !ok { + return "" + } + obj, ok := event.Object.(*api.Event) + if !ok { + w.Stop() + return "" + } + isRollback, result := isRollbackEvent(obj) + if isRollback { + w.Stop() + return result + } + case <-signals: + w.Stop() + } + } +} + +// isRollbackEvent checks if the input event is about rollback, and returns true and +// related result string back if it is. +func isRollbackEvent(e *api.Event) (bool, string) { + rollbackEventReasons := []string{deploymentutil.RollbackRevisionNotFound, deploymentutil.RollbackTemplateUnchanged, deploymentutil.RollbackDone} + for _, reason := range rollbackEventReasons { + if e.Reason == reason { + if reason == deploymentutil.RollbackDone { + return true, "rolled back" + } + return true, fmt.Sprintf("skipped rollback (%s: %s)", e.Reason, e.Message) + } + } + return false, "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go new file mode 100644 index 00000000..0af1253a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -0,0 +1,810 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + goerrors "errors" + "fmt" + "io" + "strconv" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/deployment" + "k8s.io/kubernetes/pkg/util/integer" + "k8s.io/kubernetes/pkg/util/intstr" + "k8s.io/kubernetes/pkg/util/wait" +) + +const ( + sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id" + desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" + originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" + nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" +) + +// RollingUpdaterConfig is the configuration for a rolling deployment process. +type RollingUpdaterConfig struct { + // Out is a writer for progress output. + Out io.Writer + // OldRC is an existing controller to be replaced. + OldRc *api.ReplicationController + // NewRc is a controller that will take ownership of updated pods (will be + // created if needed). + NewRc *api.ReplicationController + // UpdatePeriod is the time to wait between individual pod updates. + UpdatePeriod time.Duration + // Interval is the time to wait between polling controller status after + // update. + Interval time.Duration + // Timeout is the time to wait for controller updates before giving up. + Timeout time.Duration + // CleanupPolicy defines the cleanup action to take after the deployment is + // complete. + CleanupPolicy RollingUpdaterCleanupPolicy + // MaxUnavailable is the maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + MaxUnavailable intstr.IntOrString + // MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately + // when the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, new RC can be scaled up + // further, ensuring that total number of pods running at any time during + // the update is atmost 130% of desired pods. + MaxSurge intstr.IntOrString + // OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or + // abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value + // is a synthetic "progress" calculation that represents the approximate percentage completion. + OnProgress func(oldRc, newRc *api.ReplicationController, percentage int) error +} + +// RollingUpdaterCleanupPolicy is a cleanup action to take after the +// deployment is complete. +type RollingUpdaterCleanupPolicy string + +const ( + // DeleteRollingUpdateCleanupPolicy means delete the old controller. + DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete" + // PreserveRollingUpdateCleanupPolicy means keep the old controller. + PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve" + // RenameRollingUpdateCleanupPolicy means delete the old controller, and rename + // the new controller to the name of the old controller. + RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename" +) + +// RollingUpdater provides methods for updating replicated pods in a predictable, +// fault-tolerant way. +type RollingUpdater struct { + // Client interface for creating and updating controllers + c client.Interface + // Namespace for resources + ns string + // scaleAndWait scales a controller and returns its updated state. + scaleAndWait func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) + //getOrCreateTargetController gets and validates an existing controller or + //makes a new one. + getOrCreateTargetController func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) + // cleanup performs post deployment cleanup tasks for newRc and oldRc. + cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error + // getReadyPods returns the amount of old and new ready pods. + getReadyPods func(oldRc, newRc *api.ReplicationController) (int32, int32, error) +} + +// NewRollingUpdater creates a RollingUpdater from a client. +func NewRollingUpdater(namespace string, client client.Interface) *RollingUpdater { + updater := &RollingUpdater{ + c: client, + ns: namespace, + } + // Inject real implementations. + updater.scaleAndWait = updater.scaleAndWaitWithScaler + updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient + updater.getReadyPods = updater.readyPods + updater.cleanup = updater.cleanupWithClients + return updater +} + +// Update all pods for a ReplicationController (oldRc) by creating a new +// controller (newRc) with 0 replicas, and synchronously scaling oldRc and +// newRc until oldRc has 0 replicas and newRc has the original # of desired +// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy. +// +// Each interval, the updater will attempt to make progress however it can +// without violating any availability constraints defined by the config. This +// means the amount scaled up or down each interval will vary based on the +// timeliness of readiness and the updater will always try to make progress, +// even slowly. +// +// If an update from newRc to oldRc is already in progress, we attempt to +// drive it to completion. If an error occurs at any step of the update, the +// error will be returned. +// +// A scaling event (either up or down) is considered progress; if no progress +// is made within the config.Timeout, an error is returned. +// +// TODO: make this handle performing a rollback of a partially completed +// rollout. +func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { + out := config.Out + oldRc := config.OldRc + scaleRetryParams := NewRetryParams(config.Interval, config.Timeout) + + // Find an existing controller (for continuing an interrupted update) or + // create a new one if necessary. + sourceId := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) + newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceId) + if err != nil { + return err + } + if existed { + fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name) + } else { + fmt.Fprintf(out, "Created %s\n", newRc.Name) + } + // Extract the desired replica count from the controller. + desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) + if err != nil { + return fmt.Errorf("Unable to parse annotation for %s: %s=%s", + newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) + } + desired := int32(desiredAnnotation) + // Extract the original replica count from the old controller, adding the + // annotation if it doesn't yet exist. + _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] + if !hasOriginalAnnotation { + existing, err := r.c.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name) + if err != nil { + return err + } + originReplicas := strconv.Itoa(int(existing.Spec.Replicas)) + applyUpdate := func(rc *api.ReplicationController) { + if rc.Annotations == nil { + rc.Annotations = map[string]string{} + } + rc.Annotations[originalReplicasAnnotation] = originReplicas + } + if oldRc, err = updateRcWithRetries(r.c, existing.Namespace, existing, applyUpdate); err != nil { + return err + } + } + // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods + // that can be unavailable during a rollout. + maxSurge, maxUnavailable, err := deployment.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) + if err != nil { + return err + } + // Validate maximums. + if desired > 0 && maxUnavailable == 0 && maxSurge == 0 { + return fmt.Errorf("one of maxSurge or maxUnavailable must be specified") + } + // The minumum pods which must remain available througout the update + // calculated for internal convenience. + minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) + // If the desired new scale is 0, then the max unavailable is necessarily + // the effective scale of the old RC regardless of the configuration + // (equivalent to 100% maxUnavailable). + if desired == 0 { + maxUnavailable = oldRc.Spec.Replicas + minAvailable = 0 + } + + fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n", + newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, desired+maxSurge) + + // give a caller incremental notification and allow them to exit early + goal := desired - newRc.Spec.Replicas + if goal < 0 { + goal = -goal + } + progress := func(complete bool) error { + if config.OnProgress == nil { + return nil + } + progress := desired - newRc.Spec.Replicas + if progress < 0 { + progress = -progress + } + percentage := 100 + if !complete && goal > 0 { + percentage = int((goal - progress) * 100 / goal) + } + return config.OnProgress(oldRc, newRc, percentage) + } + + // Scale newRc and oldRc until newRc has the desired number of replicas and + // oldRc has 0 replicas. + progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds() + for newRc.Spec.Replicas != desired || oldRc.Spec.Replicas != 0 { + // Store the existing replica counts for progress timeout tracking. + newReplicas := newRc.Spec.Replicas + oldReplicas := oldRc.Spec.Replicas + + // Scale up as much as possible. + scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config) + if err != nil { + return err + } + newRc = scaledRc + + // notify the caller if necessary + if err := progress(false); err != nil { + return err + } + + // Wait between scaling operations for things to settle. + time.Sleep(config.UpdatePeriod) + + // Scale down as much as possible. + scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config) + if err != nil { + return err + } + oldRc = scaledRc + + // notify the caller if necessary + if err := progress(false); err != nil { + return err + } + + // If we are making progress, continue to advance the progress deadline. + // Otherwise, time out with an error. + progressMade := (newRc.Spec.Replicas != newReplicas) || (oldRc.Spec.Replicas != oldReplicas) + if progressMade { + progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds() + } else if time.Now().UnixNano() > progressDeadline { + return fmt.Errorf("timed out waiting for any update progress to be made") + } + } + + // notify the caller if necessary + if err := progress(true); err != nil { + return err + } + + // Housekeeping and cleanup policy execution. + return r.cleanup(oldRc, newRc, config) +} + +// scaleUp scales up newRc to desired by whatever increment is possible given +// the configured surge threshold. scaleUp will safely no-op as necessary when +// it detects redundancy or other relevant conditions. +func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { + // If we're already at the desired, do nothing. + if newRc.Spec.Replicas == desired { + return newRc, nil + } + + // Scale up as far as we can based on the surge limit. + increment := (desired + maxSurge) - (oldRc.Spec.Replicas + newRc.Spec.Replicas) + // If the old is already scaled down, go ahead and scale all the way up. + if oldRc.Spec.Replicas == 0 { + increment = desired - newRc.Spec.Replicas + } + // We can't scale up without violating the surge limit, so do nothing. + if increment <= 0 { + return newRc, nil + } + // Increase the replica count, and deal with fenceposts. + newRc.Spec.Replicas += increment + if newRc.Spec.Replicas > desired { + newRc.Spec.Replicas = desired + } + // Perform the scale-up. + fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, newRc.Spec.Replicas) + scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams) + if err != nil { + return nil, err + } + return scaledRc, nil +} + +// scaleDown scales down oldRc to 0 at whatever decrement possible given the +// thresholds defined on the config. scaleDown will safely no-op as necessary +// when it detects redundancy or other relevant conditions. +func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) { + // Already scaled down; do nothing. + if oldRc.Spec.Replicas == 0 { + return oldRc, nil + } + // Get ready pods. We shouldn't block, otherwise in case both old and new + // pods are unavailable then the rolling update process blocks. + // Timeout-wise we are already covered by the progress check. + _, newAvailable, err := r.getReadyPods(oldRc, newRc) + if err != nil { + return nil, err + } + // The old controller is considered as part of the total because we want to + // maintain minimum availability even with a volatile old controller. + // Scale down as much as possible while maintaining minimum availability + allPods := oldRc.Spec.Replicas + newRc.Spec.Replicas + newUnavailable := newRc.Spec.Replicas - newAvailable + decrement := allPods - minAvailable - newUnavailable + // The decrement normally shouldn't drop below 0 because the available count + // always starts below the old replica count, but the old replica count can + // decrement due to externalities like pods death in the replica set. This + // will be considered a transient condition; do nothing and try again later + // with new readiness values. + // + // If the most we can scale is 0, it means we can't scale down without + // violating the minimum. Do nothing and try again later when conditions may + // have changed. + if decrement <= 0 { + return oldRc, nil + } + // Reduce the replica count, and deal with fenceposts. + oldRc.Spec.Replicas -= decrement + if oldRc.Spec.Replicas < 0 { + oldRc.Spec.Replicas = 0 + } + // If the new is already fully scaled and available up to the desired size, go + // ahead and scale old all the way down. + if newRc.Spec.Replicas == desired && newAvailable == desired { + oldRc.Spec.Replicas = 0 + } + // Perform the scale-down. + fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, oldRc.Spec.Replicas) + retryWait := &RetryParams{config.Interval, config.Timeout} + scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait) + if err != nil { + return nil, err + } + return scaledRc, nil +} + +// scalerScaleAndWait scales a controller using a Scaler and a real client. +func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { + scaler, err := ScalerFor(api.Kind("ReplicationController"), r.c) + if err != nil { + return nil, fmt.Errorf("Couldn't make scaler: %s", err) + } + if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil { + return nil, err + } + return r.c.ReplicationControllers(rc.Namespace).Get(rc.Name) +} + +// readyPods returns the old and new ready counts for their pods. +// If a pod is observed as being ready, it's considered ready even +// if it later becomes notReady. +func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int32, int32, error) { + controllers := []*api.ReplicationController{oldRc, newRc} + oldReady := int32(0) + newReady := int32(0) + + for i := range controllers { + controller := controllers[i] + selector := labels.Set(controller.Spec.Selector).AsSelector() + options := api.ListOptions{LabelSelector: selector} + pods, err := r.c.Pods(controller.Namespace).List(options) + if err != nil { + return 0, 0, err + } + for _, pod := range pods.Items { + if api.IsPodReady(&pod) { + switch controller.Name { + case oldRc.Name: + oldReady++ + case newRc.Name: + newReady++ + } + } + } + } + return oldReady, newReady, nil +} + +// getOrCreateTargetControllerWithClient looks for an existing controller with +// sourceId. If found, the existing controller is returned with true +// indicating that the controller already exists. If the controller isn't +// found, a new one is created and returned along with false indicating the +// controller was created. +// +// Existing controllers are validated to ensure their sourceIdAnnotation +// matches sourceId; if there's a mismatch, an error is returned. +func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { + existingRc, err := r.existingController(controller) + if err != nil { + if !errors.IsNotFound(err) { + // There was an error trying to find the controller; don't assume we + // should create it. + return nil, false, err + } + if controller.Spec.Replicas <= 0 { + return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec.Replicas) + } + // The controller wasn't found, so create it. + if controller.Annotations == nil { + controller.Annotations = map[string]string{} + } + controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas) + controller.Annotations[sourceIdAnnotation] = sourceId + controller.Spec.Replicas = 0 + newRc, err := r.c.ReplicationControllers(r.ns).Create(controller) + return newRc, false, err + } + // Validate and use the existing controller. + annotations := existingRc.Annotations + source := annotations[sourceIdAnnotation] + _, ok := annotations[desiredReplicasAnnotation] + if source != sourceId || !ok { + return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) + } + return existingRc, true, nil +} + +// existingController verifies if the controller already exists +func (r *RollingUpdater) existingController(controller *api.ReplicationController) (*api.ReplicationController, error) { + // without rc name but generate name, there's no existing rc + if len(controller.Name) == 0 && len(controller.GenerateName) > 0 { + return nil, errors.NewNotFound(api.Resource("replicationcontrollers"), controller.Name) + } + // controller name is required to get rc back + return r.c.ReplicationControllers(controller.Namespace).Get(controller.Name) +} + +// cleanupWithClients performs cleanup tasks after the rolling update. Update +// process related annotations are removed from oldRc and newRc. The +// CleanupPolicy on config is executed. +func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { + // Clean up annotations + var err error + newRc, err = r.c.ReplicationControllers(r.ns).Get(newRc.Name) + if err != nil { + return err + } + applyUpdate := func(rc *api.ReplicationController) { + delete(rc.Annotations, sourceIdAnnotation) + delete(rc.Annotations, desiredReplicasAnnotation) + } + if newRc, err = updateRcWithRetries(r.c, r.ns, newRc, applyUpdate); err != nil { + return err + } + + if err = wait.Poll(config.Interval, config.Timeout, client.ControllerHasDesiredReplicas(r.c, newRc)); err != nil { + return err + } + newRc, err = r.c.ReplicationControllers(r.ns).Get(newRc.Name) + if err != nil { + return err + } + + switch config.CleanupPolicy { + case DeleteRollingUpdateCleanupPolicy: + // delete old rc + fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name) + return r.c.ReplicationControllers(r.ns).Delete(oldRc.Name) + case RenameRollingUpdateCleanupPolicy: + // delete old rc + fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name) + if err := r.c.ReplicationControllers(r.ns).Delete(oldRc.Name); err != nil { + return err + } + fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name) + return Rename(r.c, newRc, oldRc.Name) + case PreserveRollingUpdateCleanupPolicy: + return nil + default: + return nil + } +} + +func Rename(c client.ReplicationControllersNamespacer, rc *api.ReplicationController, newName string) error { + oldName := rc.Name + rc.Name = newName + rc.ResourceVersion = "" + + _, err := c.ReplicationControllers(rc.Namespace).Create(rc) + if err != nil { + return err + } + err = c.ReplicationControllers(rc.Namespace).Delete(oldName) + if err != nil && !errors.IsNotFound(err) { + return err + } + return nil +} + +func LoadExistingNextReplicationController(c client.ReplicationControllersNamespacer, namespace, newName string) (*api.ReplicationController, error) { + if len(newName) == 0 { + return nil, nil + } + newRc, err := c.ReplicationControllers(namespace).Get(newName) + if err != nil && errors.IsNotFound(err) { + return nil, nil + } + return newRc, err +} + +type NewControllerConfig struct { + Namespace string + OldName, NewName string + Image string + Container string + DeploymentKey string + PullPolicy api.PullPolicy +} + +func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { + containerIndex := 0 + // load the old RC into the "new" RC + newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) + if err != nil { + return nil, err + } + + if len(cfg.Container) != 0 { + containerFound := false + + for i, c := range newRc.Spec.Template.Spec.Containers { + if c.Name == cfg.Container { + containerIndex = i + containerFound = true + break + } + } + + if !containerFound { + return nil, fmt.Errorf("container %s not found in pod", cfg.Container) + } + } + + if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { + return nil, goerrors.New("Must specify container to update when updating a multi-container pod") + } + + if len(newRc.Spec.Template.Spec.Containers) == 0 { + return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) + } + newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image + if len(cfg.PullPolicy) != 0 { + newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy + } + + newHash, err := api.HashObject(newRc, codec) + if err != nil { + return nil, err + } + + if len(cfg.NewName) == 0 { + cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) + } + newRc.Name = cfg.NewName + + newRc.Spec.Selector[cfg.DeploymentKey] = newHash + newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash + // Clear resource version after hashing so that identical updates get different hashes. + newRc.ResourceVersion = "" + return newRc, nil +} + +func AbortRollingUpdate(c *RollingUpdaterConfig) error { + // Swap the controllers + tmp := c.OldRc + c.OldRc = c.NewRc + c.NewRc = tmp + + if c.NewRc.Annotations == nil { + c.NewRc.Annotations = map[string]string{} + } + c.NewRc.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) + + // Use the original value since the replica count change from old to new + // could be asymmetric. If we don't know the original count, we can't safely + // roll back to a known good size. + originalSize, foundOriginal := tmp.Annotations[originalReplicasAnnotation] + if !foundOriginal { + return fmt.Errorf("couldn't find original replica count of %q", tmp.Name) + } + fmt.Fprintf(c.Out, "Setting %q replicas to %s\n", c.NewRc.Name, originalSize) + c.NewRc.Annotations[desiredReplicasAnnotation] = originalSize + c.CleanupPolicy = DeleteRollingUpdateCleanupPolicy + return nil +} + +func GetNextControllerAnnotation(rc *api.ReplicationController) (string, bool) { + res, found := rc.Annotations[nextControllerAnnotation] + return res, found +} + +func SetNextControllerAnnotation(rc *api.ReplicationController, name string) { + if rc.Annotations == nil { + rc.Annotations = map[string]string{} + } + rc.Annotations[nextControllerAnnotation] = name +} + +func UpdateExistingReplicationController(c client.Interface, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) { + if _, found := oldRc.Spec.Selector[deploymentKey]; !found { + SetNextControllerAnnotation(oldRc, newName) + return AddDeploymentKeyToReplicationController(oldRc, c, deploymentKey, deploymentValue, namespace, out) + } else { + // If we didn't need to update the controller for the deployment key, we still need to write + // the "next" controller. + applyUpdate := func(rc *api.ReplicationController) { + SetNextControllerAnnotation(rc, newName) + } + return updateRcWithRetries(c, namespace, oldRc, applyUpdate) + } +} + +func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) { + var err error + // First, update the template label. This ensures that any newly created pods will have the new label + applyUpdate := func(rc *api.ReplicationController) { + if rc.Spec.Template.Labels == nil { + rc.Spec.Template.Labels = map[string]string{} + } + rc.Spec.Template.Labels[deploymentKey] = deploymentValue + } + if oldRc, err = updateRcWithRetries(client, namespace, oldRc, applyUpdate); err != nil { + return nil, err + } + + // Update all pods managed by the rc to have the new hash label, so they are correctly adopted + // TODO: extract the code from the label command and re-use it here. + selector := labels.SelectorFromSet(oldRc.Spec.Selector) + options := api.ListOptions{LabelSelector: selector} + podList, err := client.Pods(namespace).List(options) + if err != nil { + return nil, err + } + for ix := range podList.Items { + pod := &podList.Items[ix] + applyUpdate := func(p *api.Pod) { + if p.Labels == nil { + p.Labels = map[string]string{ + deploymentKey: deploymentValue, + } + } else { + p.Labels[deploymentKey] = deploymentValue + } + } + if pod, err = updatePodWithRetries(client, namespace, pod, applyUpdate); err != nil { + return nil, err + } + } + + if oldRc.Spec.Selector == nil { + oldRc.Spec.Selector = map[string]string{} + } + // Copy the old selector, so that we can scrub out any orphaned pods + selectorCopy := map[string]string{} + for k, v := range oldRc.Spec.Selector { + selectorCopy[k] = v + } + applyUpdate = func(rc *api.ReplicationController) { + rc.Spec.Selector[deploymentKey] = deploymentValue + } + // Update the selector of the rc so it manages all the pods we updated above + if oldRc, err = updateRcWithRetries(client, namespace, oldRc, applyUpdate); err != nil { + return nil, err + } + + // Clean up any orphaned pods that don't have the new label, this can happen if the rc manager + // doesn't see the update to its pod template and creates a new pod with the old labels after + // we've finished re-adopting existing pods to the rc. + selector = labels.SelectorFromSet(selectorCopy) + options = api.ListOptions{LabelSelector: selector} + podList, err = client.Pods(namespace).List(options) + for ix := range podList.Items { + pod := &podList.Items[ix] + if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue { + if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil { + return nil, err + } + } + } + + return oldRc, nil +} + +type updateRcFunc func(controller *api.ReplicationController) + +// updateRcWithRetries retries updating the given rc on conflict with the following steps: +// 1. Get latest resource +// 2. applyUpdate +// 3. Update the resource +func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) { + // Deep copy the rc in case we failed on Get during retry loop + obj, err := api.Scheme.Copy(rc) + if err != nil { + return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err) + } + oldRc := obj.(*api.ReplicationController) + err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) { + // Apply the update, then attempt to push it to the apiserver. + applyUpdate(rc) + if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil { + // rc contains the latest controller post update + return + } + updateErr := e + // Update the controller with the latest resource version, if the update failed we + // can't trust rc so use oldRc.Name. + if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil { + // The Get failed: Value in rc cannot be trusted. + rc = oldRc + } + // Only return the error from update + return updateErr + }) + // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned + // controller contains the applied update. + return rc, err +} + +type updatePodFunc func(controller *api.Pod) + +// updatePodWithRetries retries updating the given pod on conflict with the following steps: +// 1. Get latest resource +// 2. applyUpdate +// 3. Update the resource +func updatePodWithRetries(c client.Interface, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) { + // Deep copy the pod in case we failed on Get during retry loop + obj, err := api.Scheme.Copy(pod) + if err != nil { + return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err) + } + oldPod := obj.(*api.Pod) + err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) { + // Apply the update, then attempt to push it to the apiserver. + applyUpdate(pod) + if pod, e = c.Pods(namespace).Update(pod); e == nil { + return + } + updateErr := e + if pod, e = c.Pods(namespace).Get(oldPod.Name); e != nil { + pod = oldPod + } + // Only return the error from update + return updateErr + }) + // If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned + // controller contains the applied update. + return pod, err +} + +func FindSourceController(r client.ReplicationControllersNamespacer, namespace, name string) (*api.ReplicationController, error) { + list, err := r.ReplicationControllers(namespace).List(api.ListOptions{}) + if err != nil { + return nil, err + } + for ix := range list.Items { + rc := &list.Items[ix] + if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIdAnnotation], name) { + return rc, nil + } + } + return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go new file mode 100644 index 00000000..dc39865d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" +) + +// StatusViewer provides an interface for resources that provides rollout status. +type StatusViewer interface { + Status(namespace, name string) (string, bool, error) +} + +func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) { + switch kind { + case extensions.Kind("Deployment"): + return &DeploymentStatusViewer{c.Extensions()}, nil + } + return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) +} + +type DeploymentStatusViewer struct { + c client.ExtensionsInterface +} + +// Status returns a message describing deployment status, and a bool value indicating if the status is considered done +func (s *DeploymentStatusViewer) Status(namespace, name string) (string, bool, error) { + deployment, err := s.c.Deployments(namespace).Get(name) + if err != nil { + return "", false, err + } + if deployment.Generation <= deployment.Status.ObservedGeneration { + if deployment.Status.UpdatedReplicas == deployment.Spec.Replicas { + return fmt.Sprintf("deployment %s successfully rolled out\n", name), true, nil + } + return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil + } + return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/run.go new file mode 100644 index 00000000..4e076c07 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/run.go @@ -0,0 +1,869 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/batch" + batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation" +) + +type DeploymentV1Beta1 struct{} + +func (DeploymentV1Beta1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"labels", false}, + {"default-name", false}, + {"name", true}, + {"replicas", true}, + {"image", true}, + {"port", false}, + {"hostport", false}, + {"stdin", false}, + {"tty", false}, + {"command", false}, + {"args", false}, + {"env", false}, + {"requests", false}, + {"limits", false}, + } +} + +func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + args, err := getArgs(genericParams) + if err != nil { + return nil, err + } + + envs, err := getEnvs(genericParams) + if err != nil { + return nil, err + } + + params, err := getParams(genericParams) + if err != nil { + return nil, err + } + + name, err := getName(params) + if err != nil { + return nil, err + } + + labels, err := getLabels(params, true, name) + if err != nil { + return nil, err + } + + count, err := strconv.Atoi(params["replicas"]) + if err != nil { + return nil, err + } + + podSpec, err := makePodSpec(params, name) + if err != nil { + return nil, err + } + + if err = updatePodContainers(params, args, envs, podSpec); err != nil { + return nil, err + } + + if err := updatePodPorts(params, podSpec); err != nil { + return nil, err + } + + // TODO: use versioned types for generators so that we don't need to + // set default values manually (see issue #17384) + deployment := extensions.Deployment{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: extensions.DeploymentSpec{ + Replicas: int32(count), + Selector: &unversioned.LabelSelector{MatchLabels: labels}, + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: labels, + }, + Spec: *podSpec, + }, + }, + } + return &deployment, nil +} + +func getLabels(params map[string]string, defaultRunLabel bool, name string) (map[string]string, error) { + labelString, found := params["labels"] + var labels map[string]string + var err error + if found && len(labelString) > 0 { + labels, err = ParseLabels(labelString) + if err != nil { + return nil, err + } + } else if defaultRunLabel { + labels = map[string]string{ + "run": name, + } + } + return labels, nil +} + +func getName(params map[string]string) (string, error) { + name, found := params["name"] + if !found || len(name) == 0 { + name, found = params["default-name"] + if !found || len(name) == 0 { + return "", fmt.Errorf("'name' is a required parameter.") + } + } + return name, nil +} + +func getParams(genericParams map[string]interface{}) (map[string]string, error) { + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + return params, nil +} + +func getArgs(genericParams map[string]interface{}) ([]string, error) { + args := []string{} + val, found := genericParams["args"] + if found { + var isArray bool + args, isArray = val.([]string) + if !isArray { + return nil, fmt.Errorf("expected []string, found: %v", val) + } + delete(genericParams, "args") + } + return args, nil +} + +func getEnvs(genericParams map[string]interface{}) ([]api.EnvVar, error) { + var envs []api.EnvVar + envStrings, found := genericParams["env"] + if found { + if envStringArray, isArray := envStrings.([]string); isArray { + var err error + envs, err = parseEnvs(envStringArray) + if err != nil { + return nil, err + } + delete(genericParams, "env") + } else { + return nil, fmt.Errorf("expected []string, found: %v", envStrings) + } + } + return envs, nil +} + +func getV1Envs(genericParams map[string]interface{}) ([]v1.EnvVar, error) { + var envs []v1.EnvVar + envStrings, found := genericParams["env"] + if found { + if envStringArray, isArray := envStrings.([]string); isArray { + var err error + envs, err = parseV1Envs(envStringArray) + if err != nil { + return nil, err + } + delete(genericParams, "env") + } else { + return nil, fmt.Errorf("expected []string, found: %v", envStrings) + } + } + return envs, nil +} + +type JobV1Beta1 struct{} + +func (JobV1Beta1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"labels", false}, + {"default-name", false}, + {"name", true}, + {"image", true}, + {"port", false}, + {"hostport", false}, + {"stdin", false}, + {"leave-stdin-open", false}, + {"tty", false}, + {"command", false}, + {"args", false}, + {"env", false}, + {"requests", false}, + {"limits", false}, + {"restart", false}, + } +} + +func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + args, err := getArgs(genericParams) + if err != nil { + return nil, err + } + + envs, err := getEnvs(genericParams) + if err != nil { + return nil, err + } + + params, err := getParams(genericParams) + if err != nil { + return nil, err + } + + name, err := getName(params) + if err != nil { + return nil, err + } + + labels, err := getLabels(params, true, name) + if err != nil { + return nil, err + } + + podSpec, err := makePodSpec(params, name) + if err != nil { + return nil, err + } + + if err = updatePodContainers(params, args, envs, podSpec); err != nil { + return nil, err + } + + leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + if err != nil { + return nil, err + } + podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin + + if err := updatePodPorts(params, podSpec); err != nil { + return nil, err + } + + restartPolicy := api.RestartPolicy(params["restart"]) + if len(restartPolicy) == 0 { + restartPolicy = api.RestartPolicyNever + } + podSpec.RestartPolicy = restartPolicy + + job := batch.Job{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: batch.JobSpec{ + Selector: &unversioned.LabelSelector{ + MatchLabels: labels, + }, + ManualSelector: newBool(true), + Template: api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: labels, + }, + Spec: *podSpec, + }, + }, + } + + return &job, nil +} + +type JobV1 struct{} + +func (JobV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"labels", false}, + {"default-name", false}, + {"name", true}, + {"image", true}, + {"port", false}, + {"hostport", false}, + {"stdin", false}, + {"leave-stdin-open", false}, + {"tty", false}, + {"command", false}, + {"args", false}, + {"env", false}, + {"requests", false}, + {"limits", false}, + {"restart", false}, + } +} + +func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + args, err := getArgs(genericParams) + if err != nil { + return nil, err + } + + envs, err := getV1Envs(genericParams) + if err != nil { + return nil, err + } + + params, err := getParams(genericParams) + if err != nil { + return nil, err + } + + name, err := getName(params) + if err != nil { + return nil, err + } + + labels, err := getLabels(params, true, name) + if err != nil { + return nil, err + } + + podSpec, err := makeV1PodSpec(params, name) + if err != nil { + return nil, err + } + + if err = updateV1PodContainers(params, args, envs, podSpec); err != nil { + return nil, err + } + + leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + if err != nil { + return nil, err + } + podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin + + if err := updateV1PodPorts(params, podSpec); err != nil { + return nil, err + } + + restartPolicy := v1.RestartPolicy(params["restart"]) + if len(restartPolicy) == 0 { + restartPolicy = v1.RestartPolicyNever + } + podSpec.RestartPolicy = restartPolicy + + job := batchv1.Job{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Labels: labels, + }, + Spec: *podSpec, + }, + }, + } + + return &job, nil +} + +type BasicReplicationController struct{} + +func (BasicReplicationController) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"labels", false}, + {"default-name", false}, + {"name", true}, + {"replicas", true}, + {"image", true}, + {"port", false}, + {"hostport", false}, + {"stdin", false}, + {"tty", false}, + {"command", false}, + {"args", false}, + {"env", false}, + {"requests", false}, + {"limits", false}, + } +} + +// populateResourceList takes strings of form =,= +func populateResourceList(spec string) (api.ResourceList, error) { + // empty input gets a nil response to preserve generator test expected behaviors + if spec == "" { + return nil, nil + } + + result := api.ResourceList{} + resourceStatements := strings.Split(spec, ",") + for _, resourceStatement := range resourceStatements { + parts := strings.Split(resourceStatement, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) + } + resourceName := api.ResourceName(parts[0]) + resourceQuantity, err := resource.ParseQuantity(parts[1]) + if err != nil { + return nil, err + } + result[resourceName] = resourceQuantity + } + return result, nil +} + +// populateResourceList takes strings of form =,= +func populateV1ResourceList(spec string) (v1.ResourceList, error) { + // empty input gets a nil response to preserve generator test expected behaviors + if spec == "" { + return nil, nil + } + + result := v1.ResourceList{} + resourceStatements := strings.Split(spec, ",") + for _, resourceStatement := range resourceStatements { + parts := strings.Split(resourceStatement, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) + } + resourceName := v1.ResourceName(parts[0]) + resourceQuantity, err := resource.ParseQuantity(parts[1]) + if err != nil { + return nil, err + } + result[resourceName] = resourceQuantity + } + return result, nil +} + +// HandleResourceRequirements parses the limits and requests parameters if specified +func HandleResourceRequirements(params map[string]string) (api.ResourceRequirements, error) { + result := api.ResourceRequirements{} + limits, err := populateResourceList(params["limits"]) + if err != nil { + return result, err + } + result.Limits = limits + requests, err := populateResourceList(params["requests"]) + if err != nil { + return result, err + } + result.Requests = requests + return result, nil +} + +// HandleResourceRequirements parses the limits and requests parameters if specified +func handleV1ResourceRequirements(params map[string]string) (v1.ResourceRequirements, error) { + result := v1.ResourceRequirements{} + limits, err := populateV1ResourceList(params["limits"]) + if err != nil { + return result, err + } + result.Limits = limits + requests, err := populateV1ResourceList(params["requests"]) + if err != nil { + return result, err + } + result.Requests = requests + return result, nil +} + +func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) { + stdin, err := GetBool(params, "stdin", false) + if err != nil { + return nil, err + } + + tty, err := GetBool(params, "tty", false) + if err != nil { + return nil, err + } + + resourceRequirements, err := HandleResourceRequirements(params) + if err != nil { + return nil, err + } + + spec := api.PodSpec{ + Containers: []api.Container{ + { + Name: name, + Image: params["image"], + Stdin: stdin, + TTY: tty, + Resources: resourceRequirements, + }, + }, + } + return &spec, nil +} + +func makeV1PodSpec(params map[string]string, name string) (*v1.PodSpec, error) { + stdin, err := GetBool(params, "stdin", false) + if err != nil { + return nil, err + } + + tty, err := GetBool(params, "tty", false) + if err != nil { + return nil, err + } + + resourceRequirements, err := handleV1ResourceRequirements(params) + if err != nil { + return nil, err + } + + spec := v1.PodSpec{ + Containers: []v1.Container{ + { + Name: name, + Image: params["image"], + Stdin: stdin, + TTY: tty, + Resources: resourceRequirements, + }, + }, + } + return &spec, nil +} + +func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + args, err := getArgs(genericParams) + if err != nil { + return nil, err + } + + envs, err := getEnvs(genericParams) + if err != nil { + return nil, err + } + + params, err := getParams(genericParams) + if err != nil { + return nil, err + } + + name, err := getName(params) + if err != nil { + return nil, err + } + + labels, err := getLabels(params, true, name) + if err != nil { + return nil, err + } + + count, err := strconv.Atoi(params["replicas"]) + if err != nil { + return nil, err + } + + podSpec, err := makePodSpec(params, name) + if err != nil { + return nil, err + } + + if err = updatePodContainers(params, args, envs, podSpec); err != nil { + return nil, err + } + + if err := updatePodPorts(params, podSpec); err != nil { + return nil, err + } + + controller := api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: int32(count), + Selector: labels, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: labels, + }, + Spec: *podSpec, + }, + }, + } + return &controller, nil +} + +func updatePodContainers(params map[string]string, args []string, envs []api.EnvVar, podSpec *api.PodSpec) error { + if len(args) > 0 { + command, err := GetBool(params, "command", false) + if err != nil { + return err + } + if command { + podSpec.Containers[0].Command = args + } else { + podSpec.Containers[0].Args = args + } + } + + if len(envs) > 0 { + podSpec.Containers[0].Env = envs + } + return nil +} + +func updateV1PodContainers(params map[string]string, args []string, envs []v1.EnvVar, podSpec *v1.PodSpec) error { + if len(args) > 0 { + command, err := GetBool(params, "command", false) + if err != nil { + return err + } + if command { + podSpec.Containers[0].Command = args + } else { + podSpec.Containers[0].Args = args + } + } + + if len(envs) > 0 { + podSpec.Containers[0].Env = envs + } + return nil +} + +func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error) { + port := -1 + hostPort := -1 + if len(params["port"]) > 0 { + port, err = strconv.Atoi(params["port"]) + if err != nil { + return err + } + } + + if len(params["hostport"]) > 0 { + hostPort, err = strconv.Atoi(params["hostport"]) + if err != nil { + return err + } + if hostPort > 0 && port < 0 { + return fmt.Errorf("--hostport requires --port to be specified") + } + } + + // Don't include the port if it was not specified. + if port > 0 { + podSpec.Containers[0].Ports = []api.ContainerPort{ + { + ContainerPort: int32(port), + }, + } + if hostPort > 0 { + podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) + } + } + return nil +} + +func updateV1PodPorts(params map[string]string, podSpec *v1.PodSpec) (err error) { + port := -1 + hostPort := -1 + if len(params["port"]) > 0 { + port, err = strconv.Atoi(params["port"]) + if err != nil { + return err + } + } + + if len(params["hostport"]) > 0 { + hostPort, err = strconv.Atoi(params["hostport"]) + if err != nil { + return err + } + if hostPort > 0 && port < 0 { + return fmt.Errorf("--hostport requires --port to be specified") + } + } + + // Don't include the port if it was not specified. + if port > 0 { + podSpec.Containers[0].Ports = []v1.ContainerPort{ + { + ContainerPort: int32(port), + }, + } + if hostPort > 0 { + podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) + } + } + return nil +} + +type BasicPod struct{} + +func (BasicPod) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"labels", false}, + {"default-name", false}, + {"name", true}, + {"image", true}, + {"port", false}, + {"hostport", false}, + {"stdin", false}, + {"leave-stdin-open", false}, + {"tty", false}, + {"restart", false}, + {"command", false}, + {"args", false}, + {"env", false}, + {"requests", false}, + {"limits", false}, + } +} + +func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + args, err := getArgs(genericParams) + if err != nil { + return nil, err + } + + envs, err := getEnvs(genericParams) + if err != nil { + return nil, err + } + + params, err := getParams(genericParams) + if err != nil { + return nil, err + } + + name, err := getName(params) + if err != nil { + return nil, err + } + + labels, err := getLabels(params, false, name) + if err != nil { + return nil, err + } + + stdin, err := GetBool(params, "stdin", false) + if err != nil { + return nil, err + } + leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) + if err != nil { + return nil, err + } + + tty, err := GetBool(params, "tty", false) + if err != nil { + return nil, err + } + + resourceRequirements, err := HandleResourceRequirements(params) + if err != nil { + return nil, err + } + + restartPolicy := api.RestartPolicy(params["restart"]) + if len(restartPolicy) == 0 { + restartPolicy = api.RestartPolicyAlways + } + pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: name, + Image: params["image"], + ImagePullPolicy: api.PullIfNotPresent, + Stdin: stdin, + StdinOnce: !leaveStdinOpen && stdin, + TTY: tty, + Resources: resourceRequirements, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: restartPolicy, + }, + } + if err = updatePodContainers(params, args, envs, &pod.Spec); err != nil { + return nil, err + } + + if err := updatePodPorts(params, &pod.Spec); err != nil { + return nil, err + } + return &pod, nil +} + +func parseEnvs(envArray []string) ([]api.EnvVar, error) { + envs := make([]api.EnvVar, 0, len(envArray)) + for _, env := range envArray { + pos := strings.Index(env, "=") + if pos == -1 { + return nil, fmt.Errorf("invalid env: %v", env) + } + name := env[:pos] + value := env[pos+1:] + if len(name) == 0 || !validation.IsCIdentifier(name) || len(value) == 0 { + return nil, fmt.Errorf("invalid env: %v", env) + } + envVar := api.EnvVar{Name: name, Value: value} + envs = append(envs, envVar) + } + return envs, nil +} + +func parseV1Envs(envArray []string) ([]v1.EnvVar, error) { + envs := []v1.EnvVar{} + for _, env := range envArray { + pos := strings.Index(env, "=") + if pos == -1 { + return nil, fmt.Errorf("invalid env: %v", env) + } + name := env[:pos] + value := env[pos+1:] + if len(name) == 0 || !validation.IsCIdentifier(name) || len(value) == 0 { + return nil, fmt.Errorf("invalid env: %v", env) + } + envVar := v1.EnvVar{Name: name, Value: value} + envs = append(envs, envVar) + } + return envs, nil +} + +func newBool(val bool) *bool { + p := new(bool) + *p = val + return p +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go new file mode 100644 index 00000000..f8aba07e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -0,0 +1,389 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "strconv" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/util/wait" +) + +// Scaler provides an interface for resources that can be scaled. +type Scaler interface { + // Scale scales the named resource after checking preconditions. It optionally + // retries in the event of resource version mismatch (if retry is not nil), + // and optionally waits until the status of the resource matches newSize (if wait is not nil) + Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error + // ScaleSimple does a simple one-shot attempt at scaling - not useful on it's own, but + // a necessary building block for Scale + ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error +} + +func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) { + switch kind { + case api.Kind("ReplicationController"): + return &ReplicationControllerScaler{c}, nil + case extensions.Kind("ReplicaSet"): + return &ReplicaSetScaler{c.Extensions()}, nil + case extensions.Kind("Job"), batch.Kind("Job"): + return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. + case extensions.Kind("Deployment"): + return &DeploymentScaler{c.Extensions()}, nil + } + return nil, fmt.Errorf("no scaler has been implemented for %q", kind) +} + +// ScalePrecondition describes a condition that must be true for the scale to take place +// If CurrentSize == -1, it is ignored. +// If CurrentResourceVersion is the empty string, it is ignored. +// Otherwise they must equal the values in the resource for it to be valid. +type ScalePrecondition struct { + Size int + ResourceVersion string +} + +// A PreconditionError is returned when a resource fails to match +// the scale preconditions passed to kubectl. +type PreconditionError struct { + Precondition string + ExpectedValue string + ActualValue string +} + +func (pe PreconditionError) Error() string { + return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue) +} + +type ScaleErrorType int + +const ( + ScaleGetFailure ScaleErrorType = iota + ScaleUpdateFailure + ScaleUpdateConflictFailure +) + +// A ScaleError is returned when a scale request passes +// preconditions but fails to actually scale the controller. +type ScaleError struct { + FailureType ScaleErrorType + ResourceVersion string + ActualError error +} + +func (c ScaleError) Error() string { + return fmt.Sprintf( + "Scaling the resource failed with: %v; Current resource version %s", + c.ActualError, c.ResourceVersion) +} + +// RetryParams encapsulates the retry parameters used by kubectl's scaler. +type RetryParams struct { + Interval, Timeout time.Duration +} + +func NewRetryParams(interval, timeout time.Duration) *RetryParams { + return &RetryParams{interval, timeout} +} + +// ScaleCondition is a closure around Scale that facilitates retries via util.wait +func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint) wait.ConditionFunc { + return func() (bool, error) { + err := r.ScaleSimple(namespace, name, precondition, count) + switch e, _ := err.(ScaleError); err.(type) { + case nil: + return true, nil + case ScaleError: + // Retry only on update conflicts. + if e.FailureType == ScaleUpdateConflictFailure { + return false, nil + } + } + return false, err + } +} + +// ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise +func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error { + if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))} + } + if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion { + return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion} + } + return nil +} + +type ReplicationControllerScaler struct { + c client.Interface +} + +func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { + controller, err := scaler.c.ReplicationControllers(namespace).Get(name) + if err != nil { + return ScaleError{ScaleGetFailure, "Unknown", err} + } + if preconditions != nil { + if err := preconditions.ValidateReplicationController(controller); err != nil { + return err + } + } + controller.Spec.Replicas = int32(newSize) + if _, err := scaler.c.ReplicationControllers(namespace).Update(controller); err != nil { + if errors.IsConflict(err) { + return ScaleError{ScaleUpdateConflictFailure, controller.ResourceVersion, err} + } + return ScaleError{ScaleUpdateFailure, controller.ResourceVersion, err} + } + return nil +} + +// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil), +// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value +// (if wait is not nil). +func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { + if preconditions == nil { + preconditions = &ScalePrecondition{-1, ""} + } + if retry == nil { + // Make it try only once, immediately + retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} + } + cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) + if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { + return err + } + if waitForReplicas != nil { + rc, err := scaler.c.ReplicationControllers(namespace).Get(name) + if err != nil { + return err + } + err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ControllerHasDesiredReplicas(scaler.c, rc)) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for %q to be synced", name) + } + return err + } + return nil +} + +// ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise +func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error { + if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))} + } + if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion { + return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion} + } + return nil +} + +type ReplicaSetScaler struct { + c client.ExtensionsInterface +} + +func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { + rs, err := scaler.c.ReplicaSets(namespace).Get(name) + if err != nil { + return ScaleError{ScaleGetFailure, "Unknown", err} + } + if preconditions != nil { + if err := preconditions.ValidateReplicaSet(rs); err != nil { + return err + } + } + rs.Spec.Replicas = int32(newSize) + if _, err := scaler.c.ReplicaSets(namespace).Update(rs); err != nil { + if errors.IsConflict(err) { + return ScaleError{ScaleUpdateConflictFailure, rs.ResourceVersion, err} + } + return ScaleError{ScaleUpdateFailure, rs.ResourceVersion, err} + } + return nil +} + +// Scale updates a ReplicaSet to a new size, with optional precondition check (if preconditions is +// not nil), optional retries (if retry is not nil), and then optionally waits for it's replica +// count to reach the new value (if wait is not nil). +func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { + if preconditions == nil { + preconditions = &ScalePrecondition{-1, ""} + } + if retry == nil { + // Make it try only once, immediately + retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} + } + cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) + if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { + return err + } + if waitForReplicas != nil { + rs, err := scaler.c.ReplicaSets(namespace).Get(name) + if err != nil { + return err + } + err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs)) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for %q to be synced", name) + } + return err + } + return nil +} + +// ValidateJob ensures that the preconditions match. Returns nil if they are valid, an error otherwise. +func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { + if precondition.Size != -1 && job.Spec.Parallelism == nil { + return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} + } + if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { + return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} + } + if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { + return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} + } + return nil +} + +type JobScaler struct { + c client.BatchInterface +} + +// ScaleSimple is responsible for updating job's parallelism. +func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { + job, err := scaler.c.Jobs(namespace).Get(name) + if err != nil { + return ScaleError{ScaleGetFailure, "Unknown", err} + } + if preconditions != nil { + if err := preconditions.ValidateJob(job); err != nil { + return err + } + } + parallelism := int32(newSize) + job.Spec.Parallelism = ¶llelism + if _, err := scaler.c.Jobs(namespace).Update(job); err != nil { + if errors.IsConflict(err) { + return ScaleError{ScaleUpdateConflictFailure, job.ResourceVersion, err} + } + return ScaleError{ScaleUpdateFailure, job.ResourceVersion, err} + } + return nil +} + +// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil), +// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired +// number, which can be less than requested based on job's current progress. +func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { + if preconditions == nil { + preconditions = &ScalePrecondition{-1, ""} + } + if retry == nil { + // Make it try only once, immediately + retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} + } + cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) + if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { + return err + } + if waitForReplicas != nil { + job, err := scaler.c.Jobs(namespace).Get(name) + if err != nil { + return err + } + err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.JobHasDesiredParallelism(scaler.c, job)) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for %q to be synced", name) + } + return err + } + return nil +} + +// ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise. +func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error { + if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size { + return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))} + } + if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion { + return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion} + } + return nil +} + +type DeploymentScaler struct { + c client.ExtensionsInterface +} + +// ScaleSimple is responsible for updating a deployment's desired replicas count. +func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { + deployment, err := scaler.c.Deployments(namespace).Get(name) + if err != nil { + return ScaleError{ScaleGetFailure, "Unknown", err} + } + if preconditions != nil { + if err := preconditions.ValidateDeployment(deployment); err != nil { + return err + } + } + + // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). + // For now I'm falling back to regular Deployment update operation. + deployment.Spec.Replicas = int32(newSize) + if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil { + if errors.IsConflict(err) { + return ScaleError{ScaleUpdateConflictFailure, deployment.ResourceVersion, err} + } + return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err} + } + return nil +} + +// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil), +// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. +func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { + if preconditions == nil { + preconditions = &ScalePrecondition{-1, ""} + } + if retry == nil { + // Make it try only once, immediately + retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} + } + cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) + if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { + return err + } + if waitForReplicas != nil { + deployment, err := scaler.c.Deployments(namespace).Get(name) + if err != nil { + return err + } + err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.DeploymentHasDesiredReplicas(scaler.c, deployment)) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for %q to be synced", name) + } + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go new file mode 100644 index 00000000..e5b7cc33 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go @@ -0,0 +1,207 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" +) + +// SecretGeneratorV1 supports stable generation of an opaque secret +type SecretGeneratorV1 struct { + // Name of secret (required) + Name string + // Type of secret (optional) + Type string + // FileSources to derive the secret from (optional) + FileSources []string + // LiteralSources to derive the secret from (optional) + LiteralSources []string +} + +// Ensure it supports the generator pattern that uses parameter injection +var _ Generator = &SecretGeneratorV1{} + +// Ensure it supports the generator pattern that uses parameters specified during construction +var _ StructuredGenerator = &SecretGeneratorV1{} + +// Generate returns a secret using the specified parameters +func (s SecretGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + err := ValidateParams(s.ParamNames(), genericParams) + if err != nil { + return nil, err + } + delegate := &SecretGeneratorV1{} + fromFileStrings, found := genericParams["from-file"] + if found { + fromFileArray, isArray := fromFileStrings.([]string) + if !isArray { + return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) + } + delegate.FileSources = fromFileArray + delete(genericParams, "from-file") + } + fromLiteralStrings, found := genericParams["from-literal"] + if found { + fromLiteralArray, isArray := fromLiteralStrings.([]string) + if !isArray { + return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) + } + delegate.LiteralSources = fromLiteralArray + delete(genericParams, "from-literal") + } + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + delegate.Name = params["name"] + delegate.Type = params["type"] + return delegate.StructuredGenerate() +} + +// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern +func (s SecretGeneratorV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + {"type", false}, + {"from-file", false}, + {"from-literal", false}, + {"force", false}, + } +} + +// StructuredGenerate outputs a secret object using the configured fields +func (s SecretGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := s.validate(); err != nil { + return nil, err + } + secret := &api.Secret{} + secret.Name = s.Name + secret.Data = map[string][]byte{} + if len(s.Type) > 0 { + secret.Type = api.SecretType(s.Type) + } + if len(s.FileSources) > 0 { + if err := handleFromFileSources(secret, s.FileSources); err != nil { + return nil, err + } + } + if len(s.LiteralSources) > 0 { + if err := handleFromLiteralSources(secret, s.LiteralSources); err != nil { + return nil, err + } + } + return secret, nil +} + +// validate validates required fields are set to support structured generation +func (s SecretGeneratorV1) validate() error { + if len(s.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} + +// handleFromLiteralSources adds the specified literal source information into the provided secret +func handleFromLiteralSources(secret *api.Secret, literalSources []string) error { + for _, literalSource := range literalSources { + keyName, value, err := parseLiteralSource(literalSource) + if err != nil { + return err + } + err = addKeyFromLiteralToSecret(secret, keyName, []byte(value)) + if err != nil { + return err + } + } + return nil +} + +// handleFromFileSources adds the specified file source information into the provided secret +func handleFromFileSources(secret *api.Secret, fileSources []string) error { + for _, fileSource := range fileSources { + keyName, filePath, err := parseFileSource(fileSource) + if err != nil { + return err + } + info, err := os.Stat(filePath) + if err != nil { + switch err := err.(type) { + case *os.PathError: + return fmt.Errorf("error reading %s: %v", filePath, err.Err) + default: + return fmt.Errorf("error reading %s: %v", filePath, err) + } + } + if info.IsDir() { + if strings.Contains(fileSource, "=") { + return fmt.Errorf("cannot give a key name for a directory path.") + } + fileList, err := ioutil.ReadDir(filePath) + if err != nil { + return fmt.Errorf("error listing files in %s: %v", filePath, err) + } + for _, item := range fileList { + itemPath := path.Join(filePath, item.Name()) + if item.Mode().IsRegular() { + keyName = item.Name() + err = addKeyFromFileToSecret(secret, keyName, itemPath) + if err != nil { + return err + } + } + } + } else { + err = addKeyFromFileToSecret(secret, keyName, filePath) + if err != nil { + return err + } + } + } + + return nil +} + +func addKeyFromFileToSecret(secret *api.Secret, keyName, filePath string) error { + data, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + return addKeyFromLiteralToSecret(secret, keyName, data) +} + +func addKeyFromLiteralToSecret(secret *api.Secret, keyName string, data []byte) error { + if !validation.IsSecretKey(keyName) { + return fmt.Errorf("%v is not a valid key name for a secret", keyName) + } + if _, entryExists := secret.Data[keyName]; entryExists { + return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, secret.Data) + } + secret.Data[keyName] = data + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go new file mode 100644 index 00000000..773bde38 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "encoding/json" + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/credentialprovider" + "k8s.io/kubernetes/pkg/runtime" +) + +// SecretForDockerRegistryGeneratorV1 supports stable generation of a docker registry secret +type SecretForDockerRegistryGeneratorV1 struct { + // Name of secret (required) + Name string + // Username for registry (required) + Username string + // Email for registry (required) + Email string + // Password for registry (required) + Password string + // Server for registry (required) + Server string +} + +// Ensure it supports the generator pattern that uses parameter injection +var _ Generator = &SecretForDockerRegistryGeneratorV1{} + +// Ensure it supports the generator pattern that uses parameters specified during construction +var _ StructuredGenerator = &SecretForDockerRegistryGeneratorV1{} + +// Generate returns a secret using the specified parameters +func (s SecretForDockerRegistryGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + err := ValidateParams(s.ParamNames(), genericParams) + if err != nil { + return nil, err + } + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + delegate := &SecretForDockerRegistryGeneratorV1{ + Name: params["name"], + Username: params["docker-username"], + Email: params["docker-email"], + Password: params["docker-password"], + Server: params["docker-server"], + } + return delegate.StructuredGenerate() +} + +// StructuredGenerate outputs a secret object using the configured fields +func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := s.validate(); err != nil { + return nil, err + } + dockercfgContent, err := handleDockercfgContent(s.Username, s.Password, s.Email, s.Server) + if err != nil { + return nil, err + } + secret := &api.Secret{} + secret.Name = s.Name + secret.Type = api.SecretTypeDockercfg + secret.Data = map[string][]byte{} + secret.Data[api.DockerConfigKey] = dockercfgContent + return secret, nil +} + +// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern +func (s SecretForDockerRegistryGeneratorV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + {"docker-username", true}, + {"docker-email", true}, + {"docker-password", true}, + {"docker-server", true}, + } +} + +// validate validates required fields are set to support structured generation +func (s SecretForDockerRegistryGeneratorV1) validate() error { + if len(s.Name) == 0 { + return fmt.Errorf("name must be specified") + } + if len(s.Username) == 0 { + return fmt.Errorf("username must be specified") + } + if len(s.Email) == 0 { + return fmt.Errorf("email must be specified") + } + if len(s.Password) == 0 { + return fmt.Errorf("password must be specified") + } + if len(s.Server) == 0 { + return fmt.Errorf("server must be specified") + } + return nil +} + +// handleDockercfgContent serializes a dockercfg json file +func handleDockercfgContent(username, password, email, server string) ([]byte, error) { + dockercfgAuth := credentialprovider.DockerConfigEntry{ + Username: username, + Password: password, + Email: email, + } + + dockerCfg := map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth} + + return json.Marshal(dockerCfg) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go new file mode 100644 index 00000000..05061d25 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go @@ -0,0 +1,124 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +// SecretForTLSGeneratorV1 supports stable generation of a TLS secret. +type SecretForTLSGeneratorV1 struct { + // Name is the name of this TLS secret. + Name string + // Key is the path to the user's private key. + Key string + // Cert is the path to the user's public key certificate. + Cert string +} + +// Ensure it supports the generator pattern that uses parameter injection +var _ Generator = &SecretForTLSGeneratorV1{} + +// Ensure it supports the generator pattern that uses parameters specified during construction +var _ StructuredGenerator = &SecretForTLSGeneratorV1{} + +// Generate returns a secret using the specified parameters +func (s SecretForTLSGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { + err := ValidateParams(s.ParamNames(), genericParams) + if err != nil { + return nil, err + } + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + delegate := &SecretForTLSGeneratorV1{ + Name: params["name"], + Key: params["key"], + Cert: params["cert"], + } + return delegate.StructuredGenerate() +} + +// StructuredGenerate outputs a secret object using the configured fields +func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := s.validate(); err != nil { + return nil, err + } + tlsCrt, err := readFile(s.Cert) + if err != nil { + return nil, err + } + tlsKey, err := readFile(s.Key) + if err != nil { + return nil, err + } + secret := &api.Secret{} + secret.Name = s.Name + secret.Type = api.SecretTypeTLS + secret.Data = map[string][]byte{} + secret.Data[api.TLSCertKey] = []byte(tlsCrt) + secret.Data[api.TLSPrivateKeyKey] = []byte(tlsKey) + return secret, nil +} + +// readFile just reads a file into a byte array. +func readFile(file string) ([]byte, error) { + b, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err) + } + return b, nil +} + +// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern +func (s SecretForTLSGeneratorV1) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + {"key", true}, + {"cert", true}, + } +} + +// validate validates required fields are set to support structured generation +func (s SecretForTLSGeneratorV1) validate() error { + // TODO: This is not strictly necessary. We can generate a self signed cert + // if no key/cert is given. The only requiredment is that we either get both + // or none. See test/e2e/ingress_utils for self signed cert generation. + if len(s.Key) == 0 { + return fmt.Errorf("key must be specified.") + } + if len(s.Cert) == 0 { + return fmt.Errorf("certificate must be specified.") + } + if _, err := tls.LoadX509KeyPair(s.Cert, s.Key); err != nil { + return fmt.Errorf("failed to load key pair %v", err) + } + // TODO: Add more validation. + // 1. If the certificate contains intermediates, it is a valid chain. + // 2. Format etc. + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service.go new file mode 100644 index 00000000..e8ef5dba --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/service.go @@ -0,0 +1,229 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/intstr" +) + +// The only difference between ServiceGeneratorV1 and V2 is that the service port is named "default" in V1, while it is left unnamed in V2. +type ServiceGeneratorV1 struct{} + +func (ServiceGeneratorV1) ParamNames() []GeneratorParam { + return paramNames() +} + +func (ServiceGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { + params["port-name"] = "default" + return generate(params) +} + +type ServiceGeneratorV2 struct{} + +func (ServiceGeneratorV2) ParamNames() []GeneratorParam { + return paramNames() +} + +func (ServiceGeneratorV2) Generate(params map[string]interface{}) (runtime.Object, error) { + return generate(params) +} + +func paramNames() []GeneratorParam { + return []GeneratorParam{ + {"default-name", true}, + {"name", false}, + {"selector", true}, + // port will be used if a user specifies --port OR the exposed object + // has one port + {"port", false}, + // ports will be used iff a user doesn't specify --port AND the + // exposed object has multiple ports + {"ports", false}, + {"labels", false}, + {"external-ip", false}, + {"create-external-load-balancer", false}, + {"load-balancer-ip", false}, + {"type", false}, + {"protocol", false}, + // protocols will be used to keep port-protocol mapping derived from + // exposed object + {"protocols", false}, + {"container-port", false}, // alias of target-port + {"target-port", false}, + {"port-name", false}, + {"session-affinity", false}, + } +} + +func generate(genericParams map[string]interface{}) (runtime.Object, error) { + params := map[string]string{} + for key, value := range genericParams { + strVal, isString := value.(string) + if !isString { + return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) + } + params[key] = strVal + } + selectorString, found := params["selector"] + if !found || len(selectorString) == 0 { + return nil, fmt.Errorf("'selector' is a required parameter.") + } + selector, err := ParseLabels(selectorString) + if err != nil { + return nil, err + } + + labelsString, found := params["labels"] + var labels map[string]string + if found && len(labelsString) > 0 { + labels, err = ParseLabels(labelsString) + if err != nil { + return nil, err + } + } + + name, found := params["name"] + if !found || len(name) == 0 { + name, found = params["default-name"] + if !found || len(name) == 0 { + return nil, fmt.Errorf("'name' is a required parameter.") + } + } + ports := []api.ServicePort{} + servicePortName, found := params["port-name"] + if !found { + // Leave the port unnamed. + servicePortName = "" + } + + protocolsString, found := params["protocols"] + var portProtocolMap map[string]string + if found && len(protocolsString) > 0 { + portProtocolMap, err = ParseProtocols(protocolsString) + if err != nil { + return nil, err + } + } + // ports takes precedence over port since it will be + // specified only when the user hasn't specified a port + // via --port and the exposed object has multiple ports. + var portString string + if portString, found = params["ports"]; !found { + portString, found = params["port"] + if !found { + return nil, fmt.Errorf("'port' is a required parameter.") + } + } + + portStringSlice := strings.Split(portString, ",") + for i, stillPortString := range portStringSlice { + port, err := strconv.Atoi(stillPortString) + if err != nil { + return nil, err + } + name := servicePortName + // If we are going to assign multiple ports to a service, we need to + // generate a different name for each one. + if len(portStringSlice) > 1 { + name = fmt.Sprintf("port-%d", i+1) + } + protocol := params["protocol"] + + switch { + case len(protocol) == 0 && len(portProtocolMap) == 0: + // Default to TCP, what the flag was doing previously. + protocol = "TCP" + case len(protocol) > 0 && len(portProtocolMap) > 0: + // User has specified the --protocol while exposing a multiprotocol resource + // We should stomp multiple protocols with the one specified ie. do nothing + case len(protocol) == 0 && len(portProtocolMap) > 0: + // no --protocol and we expose a multiprotocol resource + protocol = "TCP" // have the default so we can stay sane + if exposeProtocol, found := portProtocolMap[stillPortString]; found { + protocol = exposeProtocol + } + } + ports = append(ports, api.ServicePort{ + Name: name, + Port: int32(port), + Protocol: api.Protocol(protocol), + }) + } + + service := api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: api.ServiceSpec{ + Selector: selector, + Ports: ports, + }, + } + targetPortString := params["target-port"] + if len(targetPortString) == 0 { + targetPortString = params["container-port"] + } + if len(targetPortString) > 0 { + var targetPort intstr.IntOrString + if portNum, err := strconv.Atoi(targetPortString); err != nil { + targetPort = intstr.FromString(targetPortString) + } else { + targetPort = intstr.FromInt(portNum) + } + // Use the same target-port for every port + for i := range service.Spec.Ports { + service.Spec.Ports[i].TargetPort = targetPort + } + } else { + // If --target-port or --container-port haven't been specified, this + // should be the same as Port + for i := range service.Spec.Ports { + port := service.Spec.Ports[i].Port + service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port)) + } + } + if params["create-external-load-balancer"] == "true" { + service.Spec.Type = api.ServiceTypeLoadBalancer + } + if len(params["external-ip"]) > 0 { + service.Spec.ExternalIPs = []string{params["external-ip"]} + } + if len(params["type"]) != 0 { + service.Spec.Type = api.ServiceType(params["type"]) + } + if service.Spec.Type == api.ServiceTypeLoadBalancer { + service.Spec.LoadBalancerIP = params["load-balancer-ip"] + } + if len(params["session-affinity"]) != 0 { + switch api.ServiceAffinity(params["session-affinity"]) { + case api.ServiceAffinityNone: + service.Spec.SessionAffinity = api.ServiceAffinityNone + case api.ServiceAffinityClientIP: + service.Spec.SessionAffinity = api.ServiceAffinityClientIP + default: + return nil, fmt.Errorf("unknown session affinity: %s", params["session-affinity"]) + } + } + return &service, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go new file mode 100644 index 00000000..2be08dd2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/runtime" +) + +// ServiceAccountGeneratorV1 supports stable generation of a service account +type ServiceAccountGeneratorV1 struct { + // Name of service account + Name string +} + +// Ensure it supports the generator pattern that uses parameters specified during construction +var _ StructuredGenerator = &ServiceAccountGeneratorV1{} + +// StructuredGenerate outputs a service account object using the configured fields +func (g *ServiceAccountGeneratorV1) StructuredGenerate() (runtime.Object, error) { + if err := g.validate(); err != nil { + return nil, err + } + serviceAccount := &api.ServiceAccount{} + serviceAccount.Name = g.Name + return serviceAccount, nil +} + +// validate validates required fields are set to support structured generation +func (g *ServiceAccountGeneratorV1) validate() error { + if len(g.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go new file mode 100644 index 00000000..568c46d6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go @@ -0,0 +1,36 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "k8s.io/kubernetes/pkg/api" +) + +// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field +type SortableEvents []api.Event + +func (list SortableEvents) Len() int { + return len(list) +} + +func (list SortableEvents) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableEvents) Less(i, j int) bool { + return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go new file mode 100644 index 00000000..98c67344 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "sort" + + "k8s.io/kubernetes/pkg/api" + qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" +) + +type SortableResourceNames []api.ResourceName + +func (list SortableResourceNames) Len() int { + return len(list) +} + +func (list SortableResourceNames) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableResourceNames) Less(i, j int) bool { + return list[i] < list[j] +} + +// SortedResourceNames returns the sorted resource names of a resource list. +func SortedResourceNames(list api.ResourceList) []api.ResourceName { + resources := make([]api.ResourceName, 0, len(list)) + for res := range list { + resources = append(resources, res) + } + sort.Sort(SortableResourceNames(resources)) + return resources +} + +type SortableResourceQuotas []api.ResourceQuota + +func (list SortableResourceQuotas) Len() int { + return len(list) +} + +func (list SortableResourceQuotas) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableResourceQuotas) Less(i, j int) bool { + return list[i].Name < list[j].Name +} + +// SortedQoSResourceNames returns the sorted resource names of a QoS list. +func SortedQoSResourceNames(list qosutil.QoSList) []api.ResourceName { + resources := make([]api.ResourceName, 0, len(list)) + for res := range list { + resources = append(resources, res) + } + sort.Sort(SortableResourceNames(resources)) + return resources +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go new file mode 100644 index 00000000..e95b8a1e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go @@ -0,0 +1,219 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "io" + "reflect" + "sort" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/integer" + "k8s.io/kubernetes/pkg/util/jsonpath" + + "github.com/golang/glog" +) + +// Sorting printer sorts list types before delegating to another printer. +// Non-list types are simply passed through +type SortingPrinter struct { + SortField string + Delegate ResourcePrinter + Decoder runtime.Decoder +} + +func (s *SortingPrinter) PrintObj(obj runtime.Object, out io.Writer) error { + if !meta.IsListType(obj) { + return s.Delegate.PrintObj(obj, out) + } + + if err := s.sortObj(obj); err != nil { + return err + } + return s.Delegate.PrintObj(obj, out) +} + +// TODO: implement HandledResources() +func (p *SortingPrinter) HandledResources() []string { + return []string{} +} + +func (s *SortingPrinter) sortObj(obj runtime.Object) error { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + if len(objs) == 0 { + return nil + } + + sorter, err := SortObjects(s.Decoder, objs, s.SortField) + if err != nil { + return err + } + + switch list := obj.(type) { + case *v1.List: + outputList := make([]runtime.RawExtension, len(objs)) + for ix := range objs { + outputList[ix] = list.Items[sorter.OriginalPosition(ix)] + } + list.Items = outputList + return nil + } + return meta.SetList(obj, objs) +} + +func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput string) (*RuntimeSort, error) { + parser := jsonpath.New("sorting") + + field, err := massageJSONPath(fieldInput) + if err != nil { + return nil, err + } + + if err := parser.Parse(field); err != nil { + return nil, err + } + + for ix := range objs { + item := objs[ix] + switch u := item.(type) { + case *runtime.Unknown: + var err error + if objs[ix], _, err = decoder.Decode(u.Raw, nil, nil); err != nil { + return nil, err + } + } + } + + values, err := parser.FindResults(reflect.ValueOf(objs[0]).Elem().Interface()) + if err != nil { + return nil, err + } + if len(values) == 0 { + return nil, fmt.Errorf("couldn't find any field with path: %s", field) + } + + sorter := NewRuntimeSort(field, objs) + sort.Sort(sorter) + return sorter, nil +} + +// RuntimeSort is an implementation of the golang sort interface that knows how to sort +// lists of runtime.Object +type RuntimeSort struct { + field string + objs []runtime.Object + origPosition []int +} + +func NewRuntimeSort(field string, objs []runtime.Object) *RuntimeSort { + sorter := &RuntimeSort{field: field, objs: objs, origPosition: make([]int, len(objs))} + for ix := range objs { + sorter.origPosition[ix] = ix + } + return sorter +} + +func (r *RuntimeSort) Len() int { + return len(r.objs) +} + +func (r *RuntimeSort) Swap(i, j int) { + r.objs[i], r.objs[j] = r.objs[j], r.objs[i] + r.origPosition[i], r.origPosition[j] = r.origPosition[j], r.origPosition[i] +} + +func isLess(i, j reflect.Value) (bool, error) { + switch i.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return i.Int() < j.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return i.Uint() < j.Uint(), nil + case reflect.Float32, reflect.Float64: + return i.Float() < j.Float(), nil + case reflect.String: + return i.String() < j.String(), nil + case reflect.Ptr: + return isLess(i.Elem(), j.Elem()) + case reflect.Struct: + // sort unversioned.Time + in := i.Interface() + if t, ok := in.(unversioned.Time); ok { + return t.Before(j.Interface().(unversioned.Time)), nil + } + // fallback to the fields comparison + for idx := 0; idx < i.NumField(); idx++ { + less, err := isLess(i.Field(idx), j.Field(idx)) + if err != nil || !less { + return less, err + } + } + return true, nil + case reflect.Array, reflect.Slice: + // note: the length of i and j may be different + for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { + less, err := isLess(i.Index(idx), j.Index(idx)) + if err != nil || !less { + return less, err + } + } + return true, nil + default: + return false, fmt.Errorf("unsortable type: %v", i.Kind()) + } +} + +func (r *RuntimeSort) Less(i, j int) bool { + iObj := r.objs[i] + jObj := r.objs[j] + + parser := jsonpath.New("sorting") + parser.Parse(r.field) + + iValues, err := parser.FindResults(reflect.ValueOf(iObj).Elem().Interface()) + if err != nil { + glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) + } + jValues, err := parser.FindResults(reflect.ValueOf(jObj).Elem().Interface()) + if err != nil { + glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) + } + + iField := iValues[0][0] + jField := jValues[0][0] + + less, err := isLess(iField, jField) + if err != nil { + glog.Fatalf("Field %s in %v is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) + } + return less +} + +// Returns the starting (original) position of a particular index. e.g. If OriginalPosition(0) returns 5 than the +// the item currently at position 0 was at position 5 in the original unsorted array. +func (r *RuntimeSort) OriginalPosition(ix int) int { + if ix < 0 || ix > len(r.origPosition) { + return -1 + } + return r.origPosition[ix] +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go b/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go new file mode 100644 index 00000000..d784ef24 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go @@ -0,0 +1,448 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "strings" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/batch" + "k8s.io/kubernetes/pkg/apis/extensions" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util" + deploymentutil "k8s.io/kubernetes/pkg/util/deployment" + utilerrors "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/wait" +) + +const ( + Interval = time.Second * 1 + Timeout = time.Minute * 5 +) + +// A Reaper handles terminating an object as gracefully as possible. +// timeout is how long we'll wait for the termination to be successful +// gracePeriod is time given to an API object for it to delete itself cleanly, +// e.g., pod shutdown. It may or may not be supported by the API object. +type Reaper interface { + Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error +} + +type NoSuchReaperError struct { + kind unversioned.GroupKind +} + +func (n *NoSuchReaperError) Error() string { + return fmt.Sprintf("no reaper has been implemented for %v", n.kind) +} + +func IsNoSuchReaperError(err error) bool { + _, ok := err.(*NoSuchReaperError) + return ok +} + +func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) { + switch kind { + case api.Kind("ReplicationController"): + return &ReplicationControllerReaper{c, Interval, Timeout}, nil + + case extensions.Kind("ReplicaSet"): + return &ReplicaSetReaper{c, Interval, Timeout}, nil + + case extensions.Kind("DaemonSet"): + return &DaemonSetReaper{c, Interval, Timeout}, nil + + case api.Kind("Pod"): + return &PodReaper{c}, nil + + case api.Kind("Service"): + return &ServiceReaper{c}, nil + + case extensions.Kind("Job"), batch.Kind("Job"): + return &JobReaper{c, Interval, Timeout}, nil + + case extensions.Kind("Deployment"): + return &DeploymentReaper{c, Interval, Timeout}, nil + + } + return nil, &NoSuchReaperError{kind} +} + +func ReaperForReplicationController(c client.Interface, timeout time.Duration) (Reaper, error) { + return &ReplicationControllerReaper{c, Interval, timeout}, nil +} + +type ReplicationControllerReaper struct { + client.Interface + pollInterval, timeout time.Duration +} +type ReplicaSetReaper struct { + client.Interface + pollInterval, timeout time.Duration +} +type DaemonSetReaper struct { + client.Interface + pollInterval, timeout time.Duration +} +type JobReaper struct { + client.Interface + pollInterval, timeout time.Duration +} +type DeploymentReaper struct { + client.Interface + pollInterval, timeout time.Duration +} +type PodReaper struct { + client.Interface +} +type ServiceReaper struct { + client.Interface +} + +type objInterface interface { + Delete(name string) error + Get(name string) (meta.Object, error) +} + +// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. +func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { + rcs, err := c.List(api.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("error getting replication controllers: %v", err) + } + var matchingRCs []api.ReplicationController + rcLabels := labels.Set(rc.Spec.Selector) + for _, controller := range rcs.Items { + newRCLabels := labels.Set(controller.Spec.Selector) + if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) { + matchingRCs = append(matchingRCs, controller) + } + } + return matchingRCs, nil +} + +func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + rc := reaper.ReplicationControllers(namespace) + scaler, err := ScalerFor(api.Kind("ReplicationController"), *reaper) + if err != nil { + return err + } + ctrl, err := rc.Get(name) + if err != nil { + return err + } + if timeout == 0 { + timeout = Timeout + time.Duration(10*ctrl.Spec.Replicas)*time.Second + } + + // The rc manager will try and detect all matching rcs for a pod's labels, + // and only sync the oldest one. This means if we have a pod with labels + // [(k1: v1), (k2: v2)] and two rcs: rc1 with selector [(k1=v1)], and rc2 with selector [(k1=v1),(k2=v2)], + // the rc manager will sync the older of the two rcs. + // + // If there are rcs with a superset of labels, eg: + // deleting: (k1=v1), superset: (k2=v2, k1=v1) + // - It isn't safe to delete the rc because there could be a pod with labels + // (k1=v1) that isn't managed by the superset rc. We can't scale it down + // either, because there could be a pod (k2=v2, k1=v1) that it deletes + // causing a fight with the superset rc. + // If there are rcs with a subset of labels, eg: + // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) + // - Even if it's safe to delete this rc without a scale down because all it's pods + // are being controlled by the subset rc the code returns an error. + + // In theory, creating overlapping controllers is user error, so the loop below + // tries to account for this logic only in the common case, where we end up + // with multiple rcs that have an exact match on selectors. + + overlappingCtrls, err := getOverlappingControllers(rc, ctrl) + if err != nil { + return fmt.Errorf("error getting replication controllers: %v", err) + } + exactMatchRCs := []api.ReplicationController{} + overlapRCs := []string{} + for _, overlappingRC := range overlappingCtrls { + if len(overlappingRC.Spec.Selector) == len(ctrl.Spec.Selector) { + exactMatchRCs = append(exactMatchRCs, overlappingRC) + } else { + overlapRCs = append(overlapRCs, overlappingRC.Name) + } + } + if len(overlapRCs) > 0 { + return fmt.Errorf( + "Detected overlapping controllers for rc %v: %v, please manage deletion individually with --cascade=false.", + ctrl.Name, strings.Join(overlapRCs, ",")) + } + if len(exactMatchRCs) == 1 { + // No overlapping controllers. + retry := NewRetryParams(reaper.pollInterval, reaper.timeout) + waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { + return err + } + } + return rc.Delete(name) +} + +// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210 +// getOverlappingReplicaSets finds ReplicaSets that this ReplicaSet overlaps, as well as ReplicaSets overlapping this ReplicaSet. +func getOverlappingReplicaSets(c client.ReplicaSetInterface, rs *extensions.ReplicaSet) ([]extensions.ReplicaSet, []extensions.ReplicaSet, error) { + var overlappingRSs, exactMatchRSs []extensions.ReplicaSet + return overlappingRSs, exactMatchRSs, nil +} + +func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + rsc := reaper.Extensions().ReplicaSets(namespace) + scaler, err := ScalerFor(extensions.Kind("ReplicaSet"), *reaper) + if err != nil { + return err + } + rs, err := rsc.Get(name) + if err != nil { + return err + } + if timeout == 0 { + timeout = Timeout + time.Duration(10*rs.Spec.Replicas)*time.Second + } + + // The ReplicaSet controller will try and detect all matching ReplicaSets + // for a pod's labels, and only sync the oldest one. This means if we have + // a pod with labels [(k1: v1), (k2: v2)] and two ReplicaSets: rs1 with + // selector [(k1=v1)], and rs2 with selector [(k1=v1),(k2=v2)], the + // ReplicaSet controller will sync the older of the two ReplicaSets. + // + // If there are ReplicaSets with a superset of labels, eg: + // deleting: (k1=v1), superset: (k2=v2, k1=v1) + // - It isn't safe to delete the ReplicaSet because there could be a pod + // with labels (k1=v1) that isn't managed by the superset ReplicaSet. + // We can't scale it down either, because there could be a pod + // (k2=v2, k1=v1) that it deletes causing a fight with the superset + // ReplicaSet. + // If there are ReplicaSets with a subset of labels, eg: + // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) + // - Even if it's safe to delete this ReplicaSet without a scale down because + // all it's pods are being controlled by the subset ReplicaSet the code + // returns an error. + + // In theory, creating overlapping ReplicaSets is user error, so the loop below + // tries to account for this logic only in the common case, where we end up + // with multiple ReplicaSets that have an exact match on selectors. + + // TODO(madhusudancs): Re-evaluate again when controllerRef is implemented - + // https://github.com/kubernetes/kubernetes/issues/2210 + overlappingRSs, exactMatchRSs, err := getOverlappingReplicaSets(rsc, rs) + if err != nil { + return fmt.Errorf("error getting ReplicaSets: %v", err) + } + + if len(overlappingRSs) > 0 { + var names []string + for _, overlappingRS := range overlappingRSs { + names = append(names, overlappingRS.Name) + } + return fmt.Errorf( + "Detected overlapping ReplicaSets for ReplicaSet %v: %v, please manage deletion individually with --cascade=false.", + rs.Name, strings.Join(names, ",")) + } + if len(exactMatchRSs) == 0 { + // No overlapping ReplicaSets. + retry := NewRetryParams(reaper.pollInterval, reaper.timeout) + waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { + return err + } + } + + if err := rsc.Delete(name, nil); err != nil { + return err + } + return nil +} + +func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + ds, err := reaper.Extensions().DaemonSets(namespace).Get(name) + if err != nil { + return err + } + + // We set the nodeSelector to a random label. This label is nearly guaranteed + // to not be set on any node so the DameonSetController will start deleting + // daemon pods. Once it's done deleting the daemon pods, it's safe to delete + // the DaemonSet. + ds.Spec.Template.Spec.NodeSelector = map[string]string{ + string(util.NewUUID()): string(util.NewUUID()), + } + // force update to avoid version conflict + ds.ResourceVersion = "" + + if ds, err = reaper.Extensions().DaemonSets(namespace).Update(ds); err != nil { + return err + } + + // Wait for the daemon set controller to kill all the daemon pods. + if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) { + updatedDS, err := reaper.Extensions().DaemonSets(namespace).Get(name) + if err != nil { + return false, nil + } + return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil + }); err != nil { + return err + } + + return reaper.Extensions().DaemonSets(namespace).Delete(name) +} + +func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + jobs := reaper.Batch().Jobs(namespace) + pods := reaper.Pods(namespace) + scaler, err := ScalerFor(batch.Kind("Job"), *reaper) + if err != nil { + return err + } + job, err := jobs.Get(name) + if err != nil { + return err + } + if timeout == 0 { + // we will never have more active pods than job.Spec.Parallelism + parallelism := *job.Spec.Parallelism + timeout = Timeout + time.Duration(10*parallelism)*time.Second + } + + // TODO: handle overlapping jobs + retry := NewRetryParams(reaper.pollInterval, reaper.timeout) + waitForJobs := NewRetryParams(reaper.pollInterval, timeout) + if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil { + return err + } + // at this point only dead pods are left, that should be removed + selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) + options := api.ListOptions{LabelSelector: selector} + podList, err := pods.List(options) + if err != nil { + return err + } + errList := []error{} + for _, pod := range podList.Items { + if err := pods.Delete(pod.Name, gracePeriod); err != nil { + // ignores the error when the pod isn't found + if !errors.IsNotFound(err) { + errList = append(errList, err) + } + } + } + if len(errList) > 0 { + return utilerrors.NewAggregate(errList) + } + // once we have all the pods removed we can safely remove the job itself + return jobs.Delete(name, nil) +} + +func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + deployments := reaper.Extensions().Deployments(namespace) + replicaSets := reaper.Extensions().ReplicaSets(namespace) + rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper) + + deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { + // set deployment's history and scale to 0 + // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 + d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) + d.Spec.Replicas = 0 + d.Spec.Paused = true + }) + if err != nil { + return err + } + + // Use observedGeneration to determine if the deployment controller noticed the pause. + if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { + return deployments.Get(name) + }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { + return err + } + + // Stop all replica sets. + selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return err + } + + options := api.ListOptions{LabelSelector: selector} + rsList, err := replicaSets.List(options) + if err != nil { + return err + } + errList := []error{} + for _, rc := range rsList.Items { + if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { + scaleGetErr, ok := err.(*ScaleError) + if !errors.IsNotFound(err) || ok && !errors.IsNotFound(scaleGetErr.ActualError) { + errList = append(errList, err) + } + } + } + if len(errList) > 0 { + return utilerrors.NewAggregate(errList) + } + + // Delete deployment at the end. + // Note: We delete deployment at the end so that if removing RSs fails, we atleast have the deployment to retry. + return deployments.Delete(name, nil) +} + +type updateDeploymentFunc func(d *extensions.Deployment) + +func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { + deployments := reaper.Extensions().Deployments(namespace) + err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { + if deployment, err = deployments.Get(name); err != nil { + return false, err + } + // Apply the update, then attempt to push it to the apiserver. + applyUpdate(deployment) + if deployment, err = deployments.Update(deployment); err == nil { + return true, nil + } + return false, nil + }) + return deployment, err +} + +func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + pods := reaper.Pods(namespace) + _, err := pods.Get(name) + if err != nil { + return err + } + return pods.Delete(name, gracePeriod) +} + +func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { + services := reaper.Services(namespace) + _, err := services.Get(name) + if err != nil { + return err + } + return services.Delete(name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/version.go b/vendor/k8s.io/kubernetes/pkg/kubectl/version.go new file mode 100644 index 00000000..4c39b3c9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/version.go @@ -0,0 +1,40 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + "io" + "os" + + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/version" +) + +func GetServerVersion(w io.Writer, kubeClient client.Interface) { + serverVersion, err := kubeClient.Discovery().ServerVersion() + if err != nil { + fmt.Printf("Couldn't read server version from server: %v\n", err) + os.Exit(1) + } + + fmt.Fprintf(w, "Server Version: %#v\n", *serverVersion) +} + +func GetClientVersion(w io.Writer) { + fmt.Fprintf(w, "Client Version: %#v\n", version.Get()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go b/vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go new file mode 100644 index 00000000..d2920dd7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "os" + "os/signal" + + "k8s.io/kubernetes/pkg/watch" +) + +// WatchLoop loops, passing events in w to fn. +// If user sends interrupt signal, shut down cleanly. Otherwise, never return. +func WatchLoop(w watch.Interface, fn func(watch.Event) error) { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + defer signal.Stop(signals) + for { + select { + case event, ok := <-w.ResultChan(): + if !ok { + return + } + if err := fn(event); err != nil { + w.Stop() + } + case <-signals: + w.Stop() + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go new file mode 100644 index 00000000..04a25c90 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package qos contains helper functions for quality of service. +// For each resource (memory, CPU) Kubelet supports three classes of containers. +// Memory guaranteed containers will receive the highest priority and will get all the resources +// they need. +// Burstable containers will be guaranteed their request and can “burst” and use more resources +// when available. +// Best-Effort containers, which don’t specify a request, can use resources only if not being used +// by other pods. +package qos diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go new file mode 100644 index 00000000..511e629f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -0,0 +1,67 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qos + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/qos/util" +) + +const ( + PodInfraOOMAdj int = -999 + KubeletOOMScoreAdj int = -999 + KubeProxyOOMScoreAdj int = -999 + guaranteedOOMScoreAdj int = -998 + besteffortOOMScoreAdj int = 1000 +) + +// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the +// container should be adjusted. +// The OOM score of a process is the percentage of memory it consumes +// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 +// and 1000. Containers with higher OOM scores are killed if the system runs out of memory. +// See https://lwn.net/Articles/391222/ for more information. +func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int { + switch util.GetPodQos(pod) { + case util.Guaranteed: + // Guaranteed containers should be the last to get killed. + return guaranteedOOMScoreAdj + case util.BestEffort: + return besteffortOOMScoreAdj + } + + // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally, + // we want to protect Burstable containers that consume less memory than requested. + // The formula below is a heuristic. A container requesting for 10% of a system's + // memory will have an OOM score adjust of 900. If a process in container Y + // uses over 10% of memory, its OOM score will be 1000. The idea is that containers + // which use more than their request will have an OOM score of 1000 and will be prime + // targets for OOM kills. + // Note that this is a heuristic, it won't work if a container has many small processes. + memoryRequest := container.Resources.Requests.Memory().Value() + oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity + // A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure + // that burstable pods have a higher OOM score adjustment. + if oomScoreAdjust < 2 { + return 2 + } + // Give burstable pods a higher chance of survival over besteffort pods. + if int(oomScoreAdjust) == besteffortOOMScoreAdj { + return int(oomScoreAdjust - 1) + } + return int(oomScoreAdjust) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go new file mode 100644 index 00000000..9d7a5786 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go @@ -0,0 +1,146 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" +) + +const ( + Guaranteed = "Guaranteed" + Burstable = "Burstable" + BestEffort = "BestEffort" +) + +// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed. +func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { + // A container resource is guaranteed if its request == limit. + // If request == limit, the user is very confident of resource consumption. + req, hasReq := container.Resources.Requests[resource] + limit, hasLimit := container.Resources.Limits[resource] + if !hasReq || !hasLimit { + return false + } + return req.Cmp(limit) == 0 && req.Value() != 0 +} + +// isResourceBestEffort returns true if the container's resource requirements are best-effort. +func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool { + // A container resource is best-effort if its request is unspecified or 0. + // If a request is specified, then the user expects some kind of resource guarantee. + req, hasReq := container.Resources.Requests[resource] + return !hasReq || req.Value() == 0 +} + +// GetPodQos returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func GetPodQos(pod *api.Pod) string { + requests := api.ResourceList{} + limits := api.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + for _, container := range pod.Spec.Containers { + // process requests + for name, quantity := range container.Resources.Requests { + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := requests[name]; !exists { + requests[name] = *delta + } else { + delta.Add(requests[name]) + requests[name] = *delta + } + } + } + // process limits + for name, quantity := range container.Resources.Limits { + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := limits[name]; !exists { + limits[name] = *delta + } else { + delta.Add(limits[name]) + limits[name] = *delta + } + } + } + if len(container.Resources.Limits) != len(supportedComputeResources) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return BestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) && + len(limits) == len(supportedComputeResources) { + return Guaranteed + } + return Burstable +} + +// QoSList is a set of (resource name, QoS class) pairs. +type QoSList map[api.ResourceName]string + +// GetQoS returns a mapping of resource name to QoS class of a container +func GetQoS(container *api.Container) QoSList { + resourceToQoS := QoSList{} + for resource := range allResources(container) { + switch { + case isResourceGuaranteed(container, resource): + resourceToQoS[resource] = Guaranteed + case isResourceBestEffort(container, resource): + resourceToQoS[resource] = BestEffort + default: + resourceToQoS[resource] = Burstable + } + } + return resourceToQoS +} + +// supportedComputeResources is the list of supported compute resources +var supportedComputeResources = []api.ResourceName{ + api.ResourceCPU, + api.ResourceMemory, +} + +// allResources returns a set of all possible resources whose mapped key value is true if present on the container +func allResources(container *api.Container) map[api.ResourceName]bool { + resources := map[api.ResourceName]bool{} + for _, resource := range supportedComputeResources { + resources[resource] = false + } + for resource := range container.Resources.Requests { + resources[resource] = true + } + for resource := range container.Resources.Limits { + resources[resource] = true + } + return resources +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go new file mode 100644 index 00000000..dc6c989e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ports defines ports used by various pieces of the kubernetes +// infrastructure. +package ports diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go new file mode 100644 index 00000000..246a1a56 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go @@ -0,0 +1,40 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ports + +const ( + // ProxyPort is the default port for the proxy healthz server. + // May be overridden by a flag at startup. + ProxyStatusPort = 10249 + // KubeletPort is the default port for the kubelet server on each host machine. + // May be overridden by a flag at startup. + KubeletPort = 10250 + // SchedulerPort is the default port for the scheduler status server. + // May be overridden by a flag at startup. + SchedulerPort = 10251 + // ControllerManagerPort is the default port for the controller manager status server. + // May be overridden by a flag at startup. + ControllerManagerPort = 10252 + // Port for flannel daemon. + FlannelDaemonPort = 10253 + // KubeletReadOnlyPort exposes basic read-only services from the kubelet. + // May be overridden by a flag at startup. + // This is necessary for heapster to collect monitoring stats from the kubelet + // until heapster can transition to using the SSL endpoint. + // TODO(roberthbailey): Remove this once we have a better solution for heapster. + KubeletReadOnlyPort = 10255 +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go new file mode 100644 index 00000000..2486e9b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package generic provides a generic object store interface and a +// generic label/field matching type. +package generic diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go new file mode 100644 index 00000000..08e2df7b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go @@ -0,0 +1,142 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" +) + +// AttrFunc returns label and field sets for List or Watch to compare against, or an error. +type AttrFunc func(obj runtime.Object) (label labels.Set, field fields.Set, err error) + +// ObjectMetaFieldsSet returns a fields set that represents the ObjectMeta. +func ObjectMetaFieldsSet(objectMeta api.ObjectMeta, hasNamespaceField bool) fields.Set { + if !hasNamespaceField { + return fields.Set{ + "metadata.name": objectMeta.Name, + } + } + return fields.Set{ + "metadata.name": objectMeta.Name, + "metadata.namespace": objectMeta.Namespace, + } +} + +// MergeFieldsSets merges a fields'set from fragment into the source. +func MergeFieldsSets(source fields.Set, fragment fields.Set) fields.Set { + for k, value := range fragment { + source[k] = value + } + return source +} + +// SelectionPredicate implements a generic predicate that can be passed to +// GenericRegistry's List or Watch methods. Implements the Matcher interface. +type SelectionPredicate struct { + Label labels.Selector + Field fields.Selector + GetAttrs AttrFunc +} + +// Matches returns true if the given object's labels and fields (as +// returned by s.GetAttrs) match s.Label and s.Field. An error is +// returned if s.GetAttrs fails. +func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { + if s.Label.Empty() && s.Field.Empty() { + return true, nil + } + labels, fields, err := s.GetAttrs(obj) + if err != nil { + return false, err + } + return s.Label.Matches(labels) && s.Field.Matches(fields), nil +} + +// MatchesSingle will return (name, true) if and only if s.Field matches on the object's +// name. +func (s *SelectionPredicate) MatchesSingle() (string, bool) { + // TODO: should be namespace.name + if name, ok := s.Field.RequiresExactMatch("metadata.name"); ok { + return name, true + } + return "", false +} + +// Matcher can return true if an object matches the Matcher's selection +// criteria. If it is known that the matcher will match only a single object +// then MatchesSingle should return the key of that object and true. This is an +// optimization only--Matches() should continue to work. +type Matcher interface { + // Matches should return true if obj matches this matcher's requirements. + Matches(obj runtime.Object) (matchesThisObject bool, err error) + + // If this matcher matches a single object, return the key for that + // object and true here. This will greatly increase efficiency. You + // must still implement Matches(). Note that key does NOT need to + // include the object's namespace. + MatchesSingle() (key string, matchesSingleObject bool) + + // TODO: when we start indexing objects, add something like the below: + // MatchesIndices() (indexName []string, indexValue []string) + // where indexName/indexValue are the same length. +} + +// MatcherFunc makes a matcher from the provided function. For easy definition +// of matchers for testing. Note: use SelectionPredicate above for real code! +func MatcherFunc(f func(obj runtime.Object) (bool, error)) Matcher { + return matcherFunc(f) +} + +type matcherFunc func(obj runtime.Object) (bool, error) + +// Matches calls the embedded function. +func (m matcherFunc) Matches(obj runtime.Object) (bool, error) { + return m(obj) +} + +// MatchesSingle always returns "", false-- because this is a predicate +// implementation of Matcher. +func (m matcherFunc) MatchesSingle() (string, bool) { + return "", false +} + +// MatchOnKey returns a matcher that will send only the object matching key +// through the matching function f. For testing! +// Note: use SelectionPredicate above for real code! +func MatchOnKey(key string, f func(obj runtime.Object) (bool, error)) Matcher { + return matchKey{key, f} +} + +type matchKey struct { + key string + matcherFunc +} + +// MatchesSingle always returns its key, true. +func (m matchKey) MatchesSingle() (string, bool) { + return m.key, true +} + +var ( + // Assert implementations match the interface. + _ = Matcher(matchKey{}) + _ = Matcher(&SelectionPredicate{}) + _ = Matcher(matcherFunc(nil)) +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/options.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/options.go new file mode 100644 index 00000000..eea52c99 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/generic/options.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + pkgstorage "k8s.io/kubernetes/pkg/storage" +) + +// RESTOptions is set of configuration options to generic registries. +type RESTOptions struct { + Storage pkgstorage.Interface + Decorator StorageDecorator + DeleteCollectionWorkers int +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go new file mode 100644 index 00000000..70109efe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/storage" +) + +// StorageDecorator is a function signature for producing +// a storage.Interface from given parameters. +type StorageDecorator func( + storageInterface storage.Interface, + capacity int, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) storage.Interface + +// Returns given 'storageInterface' without any decoration. +func UndecoratedStorage( + storageInterface storage.Interface, + capacity int, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) storage.Interface { + return storageInterface +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go new file mode 100644 index 00000000..892726ca --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go @@ -0,0 +1,601 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + apiutil "k8s.io/kubernetes/pkg/api/util" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/yaml" + "k8s.io/kubernetes/pkg/watch/versioned" +) + +type thirdPartyObjectConverter struct { + converter runtime.ObjectConvertor +} + +func (t *thirdPartyObjectConverter) ConvertToVersion(in runtime.Object, outVersion runtime.GroupVersioner) (out runtime.Object, err error) { + switch in.(type) { + // This seems weird, but in this case the ThirdPartyResourceData is really just a wrapper on the raw 3rd party data. + // The actual thing printed/sent to server is the actual raw third party resource data, which only has one version. + case *extensions.ThirdPartyResourceData: + return in, nil + default: + return t.converter.ConvertToVersion(in, outVersion) + } +} + +func (t *thirdPartyObjectConverter) Convert(in, out, context interface{}) error { + return t.converter.Convert(in, out, context) +} + +func (t *thirdPartyObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return t.converter.ConvertFieldLabel(version, kind, label, value) +} + +func NewThirdPartyObjectConverter(converter runtime.ObjectConvertor) runtime.ObjectConvertor { + return &thirdPartyObjectConverter{converter} +} + +type thirdPartyResourceDataMapper struct { + mapper meta.RESTMapper + kind string + version string + group string +} + +var _ meta.RESTMapper = &thirdPartyResourceDataMapper{} + +func (t *thirdPartyResourceDataMapper) getResource() unversioned.GroupVersionResource { + plural, _ := meta.KindToResource(t.getKind()) + + return plural +} + +func (t *thirdPartyResourceDataMapper) getKind() unversioned.GroupVersionKind { + return unversioned.GroupVersionKind{Group: t.group, Version: t.version, Kind: t.kind} +} + +func (t *thirdPartyResourceDataMapper) isThirdPartyResource(partialResource unversioned.GroupVersionResource) bool { + actualResource := t.getResource() + if strings.ToLower(partialResource.Resource) != strings.ToLower(actualResource.Resource) { + return false + } + if len(partialResource.Group) != 0 && partialResource.Group != actualResource.Group { + return false + } + if len(partialResource.Version) != 0 && partialResource.Version != actualResource.Version { + return false + } + + return true +} + +func (t *thirdPartyResourceDataMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { + if t.isThirdPartyResource(resource) { + return []unversioned.GroupVersionResource{t.getResource()}, nil + } + return t.mapper.ResourcesFor(resource) +} + +func (t *thirdPartyResourceDataMapper) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { + if t.isThirdPartyResource(resource) { + return []unversioned.GroupVersionKind{t.getKind()}, nil + } + return t.mapper.KindsFor(resource) +} + +func (t *thirdPartyResourceDataMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { + if t.isThirdPartyResource(resource) { + return t.getResource(), nil + } + return t.mapper.ResourceFor(resource) +} + +func (t *thirdPartyResourceDataMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { + if t.isThirdPartyResource(resource) { + return t.getKind(), nil + } + return t.mapper.KindFor(resource) +} + +func (t *thirdPartyResourceDataMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if len(versions) != 1 { + return nil, fmt.Errorf("unexpected set of versions: %v", versions) + } + if gk.Group != t.group { + return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) + } + if gk.Kind != "ThirdPartyResourceData" { + return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) + } + if versions[0] != t.version { + return nil, fmt.Errorf("unknown version %q expected %q", versions[0], t.version) + } + + // TODO figure out why we're doing this rewriting + extensionGK := unversioned.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} + + mapping, err := t.mapper.RESTMapping(extensionGK, registered.GroupOrDie(extensions.GroupName).GroupVersion.Version) + if err != nil { + return nil, err + } + mapping.ObjectConvertor = &thirdPartyObjectConverter{mapping.ObjectConvertor} + return mapping, nil +} + +func (t *thirdPartyResourceDataMapper) RESTMappings(gk unversioned.GroupKind) ([]*meta.RESTMapping, error) { + if gk.Group != t.group { + return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) + } + if gk.Kind != "ThirdPartyResourceData" { + return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) + } + + // TODO figure out why we're doing this rewriting + extensionGK := unversioned.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} + + mappings, err := t.mapper.RESTMappings(extensionGK) + if err != nil { + return nil, err + } + for _, m := range mappings { + m.ObjectConvertor = &thirdPartyObjectConverter{m.ObjectConvertor} + } + return mappings, nil +} + +func (t *thirdPartyResourceDataMapper) AliasesForResource(resource string) ([]string, bool) { + return t.mapper.AliasesForResource(resource) +} + +func (t *thirdPartyResourceDataMapper) ResourceSingularizer(resource string) (singular string, err error) { + return t.mapper.ResourceSingularizer(resource) +} + +func NewMapper(mapper meta.RESTMapper, kind, version, group string) meta.RESTMapper { + return &thirdPartyResourceDataMapper{ + mapper: mapper, + kind: kind, + version: version, + group: group, + } +} + +type thirdPartyResourceDataCodecFactory struct { + delegate runtime.NegotiatedSerializer + kind string + encodeGV unversioned.GroupVersion + decodeGV unversioned.GroupVersion +} + +func NewNegotiatedSerializer(s runtime.NegotiatedSerializer, kind string, encodeGV, decodeGV unversioned.GroupVersion) runtime.NegotiatedSerializer { + return &thirdPartyResourceDataCodecFactory{ + delegate: s, + kind: kind, + encodeGV: encodeGV, + decodeGV: decodeGV, + } +} + +func (t *thirdPartyResourceDataCodecFactory) SupportedMediaTypes() []string { + supported := sets.NewString(t.delegate.SupportedMediaTypes()...) + return supported.Intersection(sets.NewString("application/json", "application/yaml")).List() +} + +func (t *thirdPartyResourceDataCodecFactory) SerializerForMediaType(mediaType string, params map[string]string) (runtime.SerializerInfo, bool) { + switch mediaType { + case "application/json", "application/yaml": + return t.delegate.SerializerForMediaType(mediaType, params) + default: + return runtime.SerializerInfo{}, false + } +} + +func (t *thirdPartyResourceDataCodecFactory) SupportedStreamingMediaTypes() []string { + supported := sets.NewString(t.delegate.SupportedStreamingMediaTypes()...) + return supported.Intersection(sets.NewString("application/json", "application/json;stream=watch")).List() +} + +func (t *thirdPartyResourceDataCodecFactory) StreamingSerializerForMediaType(mediaType string, params map[string]string) (runtime.StreamSerializerInfo, bool) { + switch mediaType { + case "application/json", "application/json;stream=watch": + return t.delegate.StreamingSerializerForMediaType(mediaType, params) + default: + return runtime.StreamSerializerInfo{}, false + } +} + +func (t *thirdPartyResourceDataCodecFactory) EncoderForVersion(s runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + if target, ok := runtime.PreferredGroupVersion(gv); ok { + return &thirdPartyResourceDataEncoder{delegate: t.delegate.EncoderForVersion(s, gv), gvk: target.WithKind(t.kind)} + } + return &thirdPartyResourceDataEncoder{delegate: t.delegate.EncoderForVersion(s, gv)} +} + +func (t *thirdPartyResourceDataCodecFactory) DecoderToVersion(s runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return NewDecoder(t.delegate.DecoderToVersion(s, gv), t.kind) +} + +func NewCodec(delegate runtime.Codec, gvk unversioned.GroupVersionKind) runtime.Codec { + return runtime.NewCodec(NewEncoder(delegate, gvk), NewDecoder(delegate, gvk.Kind)) +} + +type thirdPartyResourceDataDecoder struct { + delegate runtime.Decoder + kind string +} + +func NewDecoder(delegate runtime.Decoder, kind string) runtime.Decoder { + return &thirdPartyResourceDataDecoder{delegate: delegate, kind: kind} +} + +var _ runtime.Decoder = &thirdPartyResourceDataDecoder{} + +func parseObject(data []byte) (map[string]interface{}, error) { + var obj interface{} + if err := json.Unmarshal(data, &obj); err != nil { + return nil, err + } + mapObj, ok := obj.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected object: %#v", obj) + } + return mapObj, nil +} + +func (t *thirdPartyResourceDataDecoder) populate(data []byte) (runtime.Object, error) { + mapObj, err := parseObject(data) + if err != nil { + return nil, err + } + return t.populateFromObject(mapObj, data) +} + +func (t *thirdPartyResourceDataDecoder) populateFromObject(mapObj map[string]interface{}, data []byte) (runtime.Object, error) { + typeMeta := unversioned.TypeMeta{} + if err := json.Unmarshal(data, &typeMeta); err != nil { + return nil, err + } + isList := strings.HasSuffix(typeMeta.Kind, "List") + switch { + case !isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind): + result := &extensions.ThirdPartyResourceData{} + if err := t.populateResource(result, mapObj, data); err != nil { + return nil, err + } + return result, nil + case isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind+"List"): + list := &extensions.ThirdPartyResourceDataList{} + if err := t.populateListResource(list, mapObj); err != nil { + return nil, err + } + return list, nil + default: + return nil, fmt.Errorf("unexpected kind: %s, expected %s", typeMeta.Kind, t.kind) + } +} + +func (t *thirdPartyResourceDataDecoder) populateResource(objIn *extensions.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { + metadata, ok := mapObj["metadata"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected object for metadata: %#v", mapObj["metadata"]) + } + + metadataData, err := json.Marshal(metadata) + if err != nil { + return err + } + + if err := json.Unmarshal(metadataData, &objIn.ObjectMeta); err != nil { + return err + } + // Override API Version with the ThirdPartyResourceData value + // TODO: fix this hard code + objIn.APIVersion = v1beta1.SchemeGroupVersion.String() + + objIn.Data = data + return nil +} + +func IsThirdPartyObject(rawData []byte, gvk *unversioned.GroupVersionKind) (isThirdParty bool, gvkOut *unversioned.GroupVersionKind, err error) { + var gv unversioned.GroupVersion + if gvk == nil { + data, err := yaml.ToJSON(rawData) + if err != nil { + return false, nil, err + } + metadata := unversioned.TypeMeta{} + if err = json.Unmarshal(data, &metadata); err != nil { + return false, nil, err + } + gv, err = unversioned.ParseGroupVersion(metadata.APIVersion) + if err != nil { + return false, nil, err + } + gvkOut = &unversioned.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: metadata.Kind, + } + } else { + gv = gvk.GroupVersion() + gvkOut = gvk + } + return registered.IsThirdPartyAPIGroupVersion(gv), gvkOut, nil +} + +func (t *thirdPartyResourceDataDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { + if into == nil { + if gvk == nil || gvk.Kind != t.kind { + if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { + return nil, nil, err + } else if !isThirdParty { + return t.delegate.Decode(data, gvk, into) + } + } + obj, err := t.populate(data) + if err != nil { + return nil, nil, err + } + return obj, gvk, nil + } + switch o := into.(type) { + case *extensions.ThirdPartyResourceData: + break + case *runtime.VersionedObjects: + // We're not sure that it's third party, we need to test + if gvk == nil || gvk.Kind != t.kind { + if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { + return nil, nil, err + } else if !isThirdParty { + return t.delegate.Decode(data, gvk, into) + } + } + obj, err := t.populate(data) + if err != nil { + return nil, nil, err + } + o.Objects = []runtime.Object{ + obj, + } + return o, gvk, nil + default: + return t.delegate.Decode(data, gvk, into) + } + + thirdParty := into.(*extensions.ThirdPartyResourceData) + var dataObj interface{} + if err := json.Unmarshal(data, &dataObj); err != nil { + return nil, nil, err + } + mapObj, ok := dataObj.(map[string]interface{}) + if !ok { + + return nil, nil, fmt.Errorf("unexpected object: %#v", dataObj) + } + /*if gvk.Kind != "ThirdPartyResourceData" { + return nil, nil, fmt.Errorf("unexpected kind: %s", gvk.Kind) + }*/ + actual := &unversioned.GroupVersionKind{} + if kindObj, found := mapObj["kind"]; !found { + if gvk == nil { + return nil, nil, runtime.NewMissingKindErr(string(data)) + } + mapObj["kind"] = gvk.Kind + actual.Kind = gvk.Kind + } else { + kindStr, ok := kindObj.(string) + if !ok { + return nil, nil, fmt.Errorf("unexpected object for 'kind': %v", kindObj) + } + if len(t.kind) > 0 && kindStr != t.kind { + return nil, nil, fmt.Errorf("kind doesn't match, expecting: %s, got %s", gvk.Kind, kindStr) + } + actual.Kind = kindStr + } + if versionObj, found := mapObj["apiVersion"]; !found { + if gvk == nil { + return nil, nil, runtime.NewMissingVersionErr(string(data)) + } + mapObj["apiVersion"] = gvk.GroupVersion().String() + actual.Group, actual.Version = gvk.Group, gvk.Version + } else { + versionStr, ok := versionObj.(string) + if !ok { + return nil, nil, fmt.Errorf("unexpected object for 'apiVersion': %v", versionObj) + } + if gvk != nil && versionStr != gvk.GroupVersion().String() { + return nil, nil, fmt.Errorf("version doesn't match, expecting: %v, got %s", gvk.GroupVersion(), versionStr) + } + gv, err := unversioned.ParseGroupVersion(versionStr) + if err != nil { + return nil, nil, err + } + actual.Group, actual.Version = gv.Group, gv.Version + } + + mapObj, err := parseObject(data) + if err != nil { + return nil, actual, err + } + if err := t.populateResource(thirdParty, mapObj, data); err != nil { + return nil, actual, err + } + return thirdParty, actual, nil +} + +func (t *thirdPartyResourceDataDecoder) populateListResource(objIn *extensions.ThirdPartyResourceDataList, mapObj map[string]interface{}) error { + items, ok := mapObj["items"].([]interface{}) + if !ok { + return fmt.Errorf("unexpected object for items: %#v", mapObj["items"]) + } + objIn.Items = make([]extensions.ThirdPartyResourceData, len(items)) + for ix := range items { + objData, err := json.Marshal(items[ix]) + if err != nil { + return err + } + objMap, err := parseObject(objData) + if err != nil { + return err + } + if err := t.populateResource(&objIn.Items[ix], objMap, objData); err != nil { + return err + } + } + return nil +} + +const template = `{ + "kind": "%s", + "apiVersion": "%s", + "metadata": {}, + "items": [ %s ] +}` + +type thirdPartyResourceDataEncoder struct { + delegate runtime.Encoder + gvk unversioned.GroupVersionKind +} + +func NewEncoder(delegate runtime.Encoder, gvk unversioned.GroupVersionKind) runtime.Encoder { + return &thirdPartyResourceDataEncoder{delegate: delegate, gvk: gvk} +} + +var _ runtime.Encoder = &thirdPartyResourceDataEncoder{} + +func encodeToJSON(obj *extensions.ThirdPartyResourceData, stream io.Writer) error { + var objOut interface{} + if err := json.Unmarshal(obj.Data, &objOut); err != nil { + return err + } + objMap, ok := objOut.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type: %v", objOut) + } + objMap["metadata"] = obj.ObjectMeta + encoder := json.NewEncoder(stream) + return encoder.Encode(objMap) +} + +func (t *thirdPartyResourceDataEncoder) Encode(obj runtime.Object, stream io.Writer) (err error) { + switch obj := obj.(type) { + case *extensions.ThirdPartyResourceData: + return encodeToJSON(obj, stream) + case *extensions.ThirdPartyResourceDataList: + // TODO: There must be a better way to do this... + dataStrings := make([]string, len(obj.Items)) + for ix := range obj.Items { + buff := &bytes.Buffer{} + err := encodeToJSON(&obj.Items[ix], buff) + if err != nil { + return err + } + dataStrings[ix] = buff.String() + } + if t.gvk.IsEmpty() { + return fmt.Errorf("thirdPartyResourceDataEncoder was not given a target version") + } + gv := t.gvk.GroupVersion() + _, err = fmt.Fprintf(stream, template, t.gvk.Kind+"List", gv.String(), strings.Join(dataStrings, ",")) + return err + case *versioned.InternalEvent: + event := &versioned.Event{} + err := versioned.Convert_versioned_InternalEvent_to_versioned_Event(obj, event, nil) + if err != nil { + return err + } + + enc := json.NewEncoder(stream) + err = enc.Encode(event) + if err != nil { + return err + } + + return nil + case *unversioned.Status, *unversioned.APIResourceList: + return t.delegate.Encode(obj, stream) + default: + return fmt.Errorf("unexpected object to encode: %#v", obj) + } +} + +func NewObjectCreator(group, version string, delegate runtime.ObjectCreater) runtime.ObjectCreater { + return &thirdPartyResourceDataCreator{group, version, delegate} +} + +type thirdPartyResourceDataCreator struct { + group string + version string + delegate runtime.ObjectCreater +} + +func (t *thirdPartyResourceDataCreator) New(kind unversioned.GroupVersionKind) (out runtime.Object, err error) { + switch kind.Kind { + case "ThirdPartyResourceData": + if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { + return nil, fmt.Errorf("unknown kind %v", kind) + } + return &extensions.ThirdPartyResourceData{}, nil + case "ThirdPartyResourceDataList": + if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { + return nil, fmt.Errorf("unknown kind %v", kind) + } + return &extensions.ThirdPartyResourceDataList{}, nil + // TODO: this list needs to be formalized higher in the chain + case "ListOptions", "WatchEvent": + if apiutil.GetGroupVersion(t.group, t.version) == kind.GroupVersion().String() { + // Translate third party group to external group. + gvk := registered.EnabledVersionsForGroup(api.GroupName)[0].WithKind(kind.Kind) + return t.delegate.New(gvk) + } + return t.delegate.New(kind) + default: + return t.delegate.New(kind) + } +} + +func NewThirdPartyParameterCodec(p runtime.ParameterCodec) runtime.ParameterCodec { + return &thirdPartyParameterCodec{p} +} + +type thirdPartyParameterCodec struct { + delegate runtime.ParameterCodec +} + +func (t *thirdPartyParameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { + return t.delegate.DecodeParameters(parameters, v1.SchemeGroupVersion, into) +} + +func (t *thirdPartyParameterCodec) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { + return t.delegate.EncodeParameters(obj, v1.SchemeGroupVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go new file mode 100644 index 00000000..62e2dc1e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package thirdpartyresourcedata provides Registry interface and its REST +// implementation for storing ThirdPartyResourceData api objects. +package thirdpartyresourcedata diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go new file mode 100644 index 00000000..058276d1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go @@ -0,0 +1,80 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/watch" +) + +// Registry is an interface implemented by things that know how to store ThirdPartyResourceData objects. +type Registry interface { + ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) + WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) + GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) + CreateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) + UpdateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) + DeleteThirdPartyResourceData(ctx api.Context, name string) error +} + +// storage puts strong typing around storage calls +type storage struct { + rest.StandardStorage +} + +// NewRegistry returns a new Registry interface for the given Storage. Any mismatched +// types will panic. +func NewRegistry(s rest.StandardStorage) Registry { + return &storage{s} +} + +func (s *storage) ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) { + obj, err := s.List(ctx, options) + if err != nil { + return nil, err + } + return obj.(*extensions.ThirdPartyResourceDataList), nil +} + +func (s *storage) WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) { + return s.Watch(ctx, options) +} + +func (s *storage) GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) { + obj, err := s.Get(ctx, name) + if err != nil { + return nil, err + } + return obj.(*extensions.ThirdPartyResourceData), nil +} + +func (s *storage) CreateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { + obj, err := s.Create(ctx, ThirdPartyResourceData) + return obj.(*extensions.ThirdPartyResourceData), err +} + +func (s *storage) UpdateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { + obj, _, err := s.Update(ctx, ThirdPartyResourceData.Name, rest.DefaultUpdatedObjectInfo(ThirdPartyResourceData, api.Scheme)) + return obj.(*extensions.ThirdPartyResourceData), err +} + +func (s *storage) DeleteThirdPartyResourceData(ctx api.Context, name string) error { + _, err := s.Delete(ctx, name, nil) + return err +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go new file mode 100644 index 00000000..9f7673d7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go @@ -0,0 +1,92 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions/validation" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/registry/generic" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// strategy implements behavior for ThirdPartyResource objects +type strategy struct { + runtime.ObjectTyper + api.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating ThirdPartyResource +// objects via the REST API. +var Strategy = strategy{api.Scheme, api.SimpleNameGenerator} + +var _ = rest.RESTCreateStrategy(Strategy) + +var _ = rest.RESTUpdateStrategy(Strategy) + +func (strategy) NamespaceScoped() bool { + return true +} + +func (strategy) PrepareForCreate(obj runtime.Object) { +} + +func (strategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { + return validation.ValidateThirdPartyResourceData(obj.(*extensions.ThirdPartyResourceData)) +} + +// Canonicalize normalizes the object after validation. +func (strategy) Canonicalize(obj runtime.Object) { +} + +func (strategy) AllowCreateOnUpdate() bool { + return false +} + +func (strategy) PrepareForUpdate(obj, old runtime.Object) { +} + +func (strategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { + return validation.ValidateThirdPartyResourceDataUpdate(obj.(*extensions.ThirdPartyResourceData), old.(*extensions.ThirdPartyResourceData)) +} + +func (strategy) AllowUnconditionalUpdate() bool { + return true +} + +// Matcher returns a generic matcher for a given label and field selector. +func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { + return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { + sa, ok := obj.(*extensions.ThirdPartyResourceData) + if !ok { + return false, fmt.Errorf("not a ThirdPartyResourceData") + } + fields := SelectableFields(sa) + return label.Matches(labels.Set(sa.Labels)) && field.Matches(fields), nil + }) +} + +// SelectableFields returns a label set that can be used for filter selection +func SelectableFields(obj *extensions.ThirdPartyResourceData) labels.Set { + return labels.Set{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go new file mode 100644 index 00000000..120981e8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go @@ -0,0 +1,68 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package thirdpartyresourcedata + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func ExtractGroupVersionKind(list *extensions.ThirdPartyResourceList) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { + gvs := []unversioned.GroupVersion{} + gvks := []unversioned.GroupVersionKind{} + for ix := range list.Items { + rsrc := &list.Items[ix] + kind, group, err := ExtractApiGroupAndKind(rsrc) + if err != nil { + return nil, nil, err + } + for _, version := range rsrc.Versions { + gv := unversioned.GroupVersion{Group: group, Version: version.Name} + gvs = append(gvs, gv) + gvks = append(gvks, unversioned.GroupVersionKind{Group: group, Version: version.Name, Kind: kind}) + } + } + return gvs, gvks, nil +} + +func convertToCamelCase(input string) string { + result := "" + toUpper := true + for ix := range input { + char := input[ix] + if toUpper { + result = result + string([]byte{(char - 32)}) + toUpper = false + } else if char == '-' { + toUpper = true + } else { + result = result + string([]byte{char}) + } + } + return result +} + +func ExtractApiGroupAndKind(rsrc *extensions.ThirdPartyResource) (kind string, group string, err error) { + parts := strings.Split(rsrc.Name, ".") + if len(parts) < 3 { + return "", "", fmt.Errorf("unexpectedly short resource name: %s, expected at least ..", rsrc.Name) + } + return convertToCamelCase(parts[0]), strings.Join(parts[1:], "."), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go new file mode 100644 index 00000000..097b1a6c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go @@ -0,0 +1,145 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/util/sets" +) + +const ( + ValidatedPSPAnnotation = "kubernetes.io/psp" +) + +func GetAllFSTypesExcept(exceptions ...string) sets.String { + fstypes := GetAllFSTypesAsSet() + for _, e := range exceptions { + fstypes.Delete(e) + } + return fstypes +} + +func GetAllFSTypesAsSet() sets.String { + fstypes := sets.NewString() + fstypes.Insert( + string(extensions.HostPath), + string(extensions.AzureFile), + string(extensions.Flocker), + string(extensions.FlexVolume), + string(extensions.EmptyDir), + string(extensions.GCEPersistentDisk), + string(extensions.AWSElasticBlockStore), + string(extensions.GitRepo), + string(extensions.Secret), + string(extensions.NFS), + string(extensions.ISCSI), + string(extensions.Glusterfs), + string(extensions.PersistentVolumeClaim), + string(extensions.RBD), + string(extensions.Cinder), + string(extensions.CephFS), + string(extensions.DownwardAPI), + string(extensions.FC), + string(extensions.ConfigMap), + string(extensions.VsphereVolume)) + return fstypes +} + +// getVolumeFSType gets the FSType for a volume. +func GetVolumeFSType(v api.Volume) (extensions.FSType, error) { + switch { + case v.HostPath != nil: + return extensions.HostPath, nil + case v.EmptyDir != nil: + return extensions.EmptyDir, nil + case v.GCEPersistentDisk != nil: + return extensions.GCEPersistentDisk, nil + case v.AWSElasticBlockStore != nil: + return extensions.AWSElasticBlockStore, nil + case v.GitRepo != nil: + return extensions.GitRepo, nil + case v.Secret != nil: + return extensions.Secret, nil + case v.NFS != nil: + return extensions.NFS, nil + case v.ISCSI != nil: + return extensions.ISCSI, nil + case v.Glusterfs != nil: + return extensions.Glusterfs, nil + case v.PersistentVolumeClaim != nil: + return extensions.PersistentVolumeClaim, nil + case v.RBD != nil: + return extensions.RBD, nil + case v.FlexVolume != nil: + return extensions.FlexVolume, nil + case v.Cinder != nil: + return extensions.Cinder, nil + case v.CephFS != nil: + return extensions.CephFS, nil + case v.Flocker != nil: + return extensions.Flocker, nil + case v.DownwardAPI != nil: + return extensions.DownwardAPI, nil + case v.FC != nil: + return extensions.FC, nil + case v.AzureFile != nil: + return extensions.AzureFile, nil + case v.ConfigMap != nil: + return extensions.ConfigMap, nil + case v.VsphereVolume != nil: + return extensions.VsphereVolume, nil + } + + return "", fmt.Errorf("unknown volume type for volume: %#v", v) +} + +// fsTypeToStringSet converts an FSType slice to a string set. +func FSTypeToStringSet(fsTypes []extensions.FSType) sets.String { + set := sets.NewString() + for _, v := range fsTypes { + set.Insert(string(v)) + } + return set +} + +// PSPAllowsAllVolumes checks for FSTypeAll in the psp's allowed volumes. +func PSPAllowsAllVolumes(psp *extensions.PodSecurityPolicy) bool { + return PSPAllowsFSType(psp, extensions.All) +} + +// PSPAllowsFSType is a utility for checking if a PSP allows a particular FSType. +// If all volumes are allowed then this will return true for any FSType passed. +func PSPAllowsFSType(psp *extensions.PodSecurityPolicy, fsType extensions.FSType) bool { + if psp == nil { + return false + } + + for _, v := range psp.Spec.Volumes { + if v == fsType || v == extensions.All { + return true + } + } + return false +} + +// FallsInRange is a utility to determine it the id falls in the valid range. +func FallsInRange(id int64, rng extensions.IDRange) bool { + return id >= rng.Min && id <= rng.Max +} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS b/vendor/k8s.io/kubernetes/pkg/storage/OWNERS new file mode 100644 index 00000000..a57ded7f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/OWNERS @@ -0,0 +1,6 @@ +assignees: + - lavalamp + - liggitt + - timothysc + - wojtek-t + - xiang90 diff --git a/vendor/k8s.io/kubernetes/pkg/storage/cacher.go b/vendor/k8s.io/kubernetes/pkg/storage/cacher.go new file mode 100644 index 00000000..e7b4d63e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/cacher.go @@ -0,0 +1,629 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/rest" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/kubernetes/pkg/watch" + + "github.com/golang/glog" + "golang.org/x/net/context" +) + +// CacherConfig contains the configuration for a given Cache. +type CacherConfig struct { + // Maximum size of the history cached in memory. + CacheCapacity int + + // An underlying storage.Interface. + Storage Interface + + // An underlying storage.Versioner. + Versioner Versioner + + // The Cache will be caching objects of a given Type and assumes that they + // are all stored under ResourcePrefix directory in the underlying database. + Type interface{} + ResourcePrefix string + + // KeyFunc is used to get a key in the underyling storage for a given object. + KeyFunc func(runtime.Object) (string, error) + + // NewList is a function that creates new empty object storing a list of + // objects of type Type. + NewListFunc func() runtime.Object +} + +// Cacher is responsible for serving WATCH and LIST requests for a given +// resource from its internal cache and updating its cache in the background +// based on the underlying storage contents. +// Cacher implements storage.Interface (although most of the calls are just +// delegated to the underlying storage). +type Cacher struct { + sync.RWMutex + + // Each user-facing method that is not simply redirected to the underlying + // storage has to read-lock on this mutex before starting any processing. + // This is necessary to prevent users from accessing structures that are + // uninitialized or are being repopulated right now. + // NOTE: We cannot easily reuse the main mutex for it due to multi-threaded + // interactions of Cacher with the underlying WatchCache. Since Cacher is + // caling WatchCache directly and WatchCache is calling Cacher methods + // via its OnEvent and OnReplace hooks, we explicitly assume that if mutexes + // of both structures are held, the one from WatchCache is acquired first + // to avoid deadlocks. Unfortunately, forcing this rule in startCaching + // would be very difficult and introducing one more mutex seems to be much + // easier. + usable sync.RWMutex + + // Underlying storage.Interface. + storage Interface + + // "sliding window" of recent changes of objects and the current state. + watchCache *watchCache + reflector *cache.Reflector + + // Registered watchers. + watcherIdx int + watchers map[int]*cacheWatcher + + // Versioner is used to handle resource versions. + versioner Versioner + + // keyFunc is used to get a key in the underyling storage for a given object. + keyFunc func(runtime.Object) (string, error) + + // Handling graceful termination. + stopLock sync.RWMutex + stopped bool + stopCh chan struct{} + stopWg sync.WaitGroup +} + +// Create a new Cacher responsible from service WATCH and LIST requests from its +// internal cache and updating its cache in the background based on the given +// configuration. +func NewCacher( + storage Interface, + capacity int, + versioner Versioner, + objectType runtime.Object, + resourcePrefix string, + scopeStrategy rest.NamespaceScopedStrategy, + newListFunc func() runtime.Object) Interface { + config := CacherConfig{ + CacheCapacity: capacity, + Storage: storage, + Versioner: versioner, + Type: objectType, + ResourcePrefix: resourcePrefix, + NewListFunc: newListFunc, + } + if scopeStrategy.NamespaceScoped() { + config.KeyFunc = func(obj runtime.Object) (string, error) { + return NamespaceKeyFunc(resourcePrefix, obj) + } + } else { + config.KeyFunc = func(obj runtime.Object) (string, error) { + return NoNamespaceKeyFunc(resourcePrefix, obj) + } + } + return NewCacherFromConfig(config) +} + +// Create a new Cacher responsible from service WATCH and LIST requests from its +// internal cache and updating its cache in the background based on the given +// configuration. +func NewCacherFromConfig(config CacherConfig) *Cacher { + watchCache := newWatchCache(config.CacheCapacity) + listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + + // Give this error when it is constructed rather than when you get the + // first watch item, because it's much easier to track down that way. + if obj, ok := config.Type.(runtime.Object); ok { + if err := runtime.CheckCodec(config.Storage.Codec(), obj); err != nil { + panic("storage codec doesn't seem to match given type: " + err.Error()) + } + } + + cacher := &Cacher{ + usable: sync.RWMutex{}, + storage: config.Storage, + watchCache: watchCache, + reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0), + watcherIdx: 0, + watchers: make(map[int]*cacheWatcher), + versioner: config.Versioner, + keyFunc: config.KeyFunc, + stopped: false, + // We need to (potentially) stop both: + // - wait.Until go-routine + // - reflector.ListAndWatch + // and there are no guarantees on the order that they will stop. + // So we will be simply closing the channel, and synchronizing on the WaitGroup. + stopCh: make(chan struct{}), + stopWg: sync.WaitGroup{}, + } + // See startCaching method for explanation and where this is unlocked. + cacher.usable.Lock() + watchCache.SetOnEvent(cacher.processEvent) + + stopCh := cacher.stopCh + cacher.stopWg.Add(1) + go func() { + defer cacher.stopWg.Done() + wait.Until( + func() { + if !cacher.isStopped() { + cacher.startCaching(stopCh) + } + }, time.Second, stopCh, + ) + }() + return cacher +} + +func (c *Cacher) startCaching(stopChannel <-chan struct{}) { + // The 'usable' lock is always 'RLock'able when it is safe to use the cache. + // It is safe to use the cache after a successful list until a disconnection. + // We start with usable (write) locked. The below OnReplace function will + // unlock it after a successful list. The below defer will then re-lock + // it when this function exits (always due to disconnection), only if + // we actually got a successful list. This cycle will repeat as needed. + successfulList := false + c.watchCache.SetOnReplace(func() { + successfulList = true + c.usable.Unlock() + }) + defer func() { + if successfulList { + c.usable.Lock() + } + }() + + c.terminateAllWatchers() + // Note that since onReplace may be not called due to errors, we explicitly + // need to retry it on errors under lock. + // Also note that startCaching is called in a loop, so there's no need + // to have another loop here. + if err := c.reflector.ListAndWatch(stopChannel); err != nil { + glog.Errorf("unexpected ListAndWatch error: %v", err) + } +} + +// Implements storage.Interface. +func (c *Cacher) Backends(ctx context.Context) []string { + return c.storage.Backends(ctx) +} + +// Implements storage.Interface. +func (c *Cacher) Versioner() Versioner { + return c.storage.Versioner() +} + +// Implements storage.Interface. +func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { + return c.storage.Create(ctx, key, obj, out, ttl) +} + +// Implements storage.Interface. +func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error { + return c.storage.Delete(ctx, key, out, preconditions) +} + +// Implements storage.Interface. +func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) { + watchRV, err := ParseWatchResourceVersion(resourceVersion) + if err != nil { + return nil, err + } + + // Do NOT allow Watch to start when the underlying structures are not propagated. + c.usable.RLock() + defer c.usable.RUnlock() + + // We explicitly use thread unsafe version and do locking ourself to ensure that + // no new events will be processed in the meantime. The watchCache will be unlocked + // on return from this function. + // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the + // underlying watchCache is calling processEvent under its lock. + c.watchCache.RLock() + defer c.watchCache.RUnlock() + initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) + if err != nil { + // To match the uncached watch implementation, once we have passed authn/authz/admission, + // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, + // rather than a directly returned error. + return newErrWatcher(err), nil + } + + c.Lock() + defer c.Unlock() + watcher := newCacheWatcher(watchRV, initEvents, filterFunction(key, c.keyFunc, filter), forgetWatcher(c, c.watcherIdx)) + c.watchers[c.watcherIdx] = watcher + c.watcherIdx++ + return watcher, nil +} + +// Implements storage.Interface. +func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) { + return c.Watch(ctx, key, resourceVersion, filter) +} + +// Implements storage.Interface. +func (c *Cacher) Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error { + return c.storage.Get(ctx, key, objPtr, ignoreNotFound) +} + +// Implements storage.Interface. +func (c *Cacher) GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error { + return c.storage.GetToList(ctx, key, filter, listObj) +} + +// Implements storage.Interface. +func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error { + if resourceVersion == "" { + // If resourceVersion is not specified, serve it from underlying + // storage (for backward compatibility). + return c.storage.List(ctx, key, resourceVersion, filter, listObj) + } + + // If resourceVersion is specified, serve it from cache. + // It's guaranteed that the returned value is at least that + // fresh as the given resourceVersion. + + listRV, err := ParseListResourceVersion(resourceVersion) + if err != nil { + return err + } + + // To avoid situation when List is processed before the underlying + // watchCache is propagated for the first time, we acquire and immediately + // release the 'usable' lock. + // We don't need to hold it all the time, because watchCache is thread-safe + // and it would complicate already very difficult locking pattern. + c.usable.RLock() + c.usable.RUnlock() + + // List elements from cache, with at least 'listRV'. + listPtr, err := meta.GetItemsPtr(listObj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil || listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + filterFunc := filterFunction(key, c.keyFunc, filter) + + objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV) + if err != nil { + return fmt.Errorf("failed to wait for fresh list: %v", err) + } + for _, obj := range objs { + object, ok := obj.(runtime.Object) + if !ok { + return fmt.Errorf("non runtime.Object returned from storage: %v", obj) + } + if filterFunc(object) { + listVal.Set(reflect.Append(listVal, reflect.ValueOf(object).Elem())) + } + } + if c.versioner != nil { + if err := c.versioner.UpdateList(listObj, readResourceVersion); err != nil { + return err + } + } + return nil +} + +// Implements storage.Interface. +func (c *Cacher) GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, preconditions *Preconditions, tryUpdate UpdateFunc) error { + return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate) +} + +// Implements storage.Interface. +func (c *Cacher) Codec() runtime.Codec { + return c.storage.Codec() +} + +func (c *Cacher) processEvent(event watchCacheEvent) { + c.Lock() + defer c.Unlock() + for _, watcher := range c.watchers { + watcher.add(event) + } +} + +func (c *Cacher) terminateAllWatchers() { + c.Lock() + defer c.Unlock() + for key, watcher := range c.watchers { + delete(c.watchers, key) + watcher.stop() + } +} + +func (c *Cacher) isStopped() bool { + c.stopLock.RLock() + defer c.stopLock.RUnlock() + return c.stopped +} + +func (c *Cacher) Stop() { + c.stopLock.Lock() + c.stopped = true + c.stopLock.Unlock() + close(c.stopCh) + c.stopWg.Wait() +} + +func forgetWatcher(c *Cacher, index int) func(bool) { + return func(lock bool) { + if lock { + c.Lock() + defer c.Unlock() + } + // It's possible that the watcher is already not in the map (e.g. in case of + // simulaneous Stop() and terminateAllWatchers(), but it doesn't break anything. + delete(c.watchers, index) + } +} + +func filterFunction(key string, keyFunc func(runtime.Object) (string, error), filter FilterFunc) FilterFunc { + return func(obj runtime.Object) bool { + objKey, err := keyFunc(obj) + if err != nil { + glog.Errorf("invalid object for filter: %v", obj) + return false + } + if !strings.HasPrefix(objKey, key) { + return false + } + return filter(obj) + } +} + +// Returns resource version to which the underlying cache is synced. +func (c *Cacher) LastSyncResourceVersion() (uint64, error) { + // To avoid situation when LastSyncResourceVersion is processed before the + // underlying watchCache is propagated, we acquire 'usable' lock. + c.usable.RLock() + defer c.usable.RUnlock() + + c.RLock() + defer c.RUnlock() + + resourceVersion := c.reflector.LastSyncResourceVersion() + if resourceVersion == "" { + return 0, nil + } + return strconv.ParseUint(resourceVersion, 10, 64) +} + +// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. +type cacherListerWatcher struct { + storage Interface + resourcePrefix string + newListFunc func() runtime.Object +} + +func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { + return &cacherListerWatcher{ + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + } +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) List(options api.ListOptions) (runtime.Object, error) { + list := lw.newListFunc() + if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil { + return nil, err + } + return list, nil +} + +// Implements cache.ListerWatcher interface. +func (lw *cacherListerWatcher) Watch(options api.ListOptions) (watch.Interface, error) { + return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything) +} + +// cacherWatch implements watch.Interface to return a single error +type errWatcher struct { + result chan watch.Event +} + +func newErrWatcher(err error) *errWatcher { + // Create an error event + errEvent := watch.Event{Type: watch.Error} + switch err := err.(type) { + case runtime.Object: + errEvent.Object = err + case *errors.StatusError: + errEvent.Object = &err.ErrStatus + default: + errEvent.Object = &unversioned.Status{ + Status: unversioned.StatusFailure, + Message: err.Error(), + Reason: unversioned.StatusReasonInternalError, + Code: http.StatusInternalServerError, + } + } + + // Create a watcher with room for a single event, populate it, and close the channel + watcher := &errWatcher{result: make(chan watch.Event, 1)} + watcher.result <- errEvent + close(watcher.result) + + return watcher +} + +// Implements watch.Interface. +func (c *errWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *errWatcher) Stop() { + // no-op +} + +// cacherWatch implements watch.Interface +type cacheWatcher struct { + sync.Mutex + input chan watchCacheEvent + result chan watch.Event + filter FilterFunc + stopped bool + forget func(bool) +} + +func newCacheWatcher(resourceVersion uint64, initEvents []watchCacheEvent, filter FilterFunc, forget func(bool)) *cacheWatcher { + watcher := &cacheWatcher{ + input: make(chan watchCacheEvent, 10), + result: make(chan watch.Event, 10), + filter: filter, + stopped: false, + forget: forget, + } + go watcher.process(initEvents, resourceVersion) + return watcher +} + +// Implements watch.Interface. +func (c *cacheWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +// Implements watch.Interface. +func (c *cacheWatcher) Stop() { + c.forget(true) + c.stop() +} + +func (c *cacheWatcher) stop() { + c.Lock() + defer c.Unlock() + if !c.stopped { + c.stopped = true + close(c.input) + } +} + +var timerPool sync.Pool + +func (c *cacheWatcher) add(event watchCacheEvent) { + // Try to send the event immediately, without blocking. + select { + case c.input <- event: + return + default: + } + + // OK, block sending, but only for up to 5 seconds. + // cacheWatcher.add is called very often, so arrange + // to reuse timers instead of constantly allocating. + const timeout = 5 * time.Second + t, ok := timerPool.Get().(*time.Timer) + if ok { + t.Reset(timeout) + } else { + t = time.NewTimer(timeout) + } + defer timerPool.Put(t) + + select { + case c.input <- event: + stopped := t.Stop() + if !stopped { + // Consume triggered (but not yet received) timer event + // so that future reuse does not get a spurious timeout. + <-t.C + } + case <-t.C: + // This means that we couldn't send event to that watcher. + // Since we don't want to block on it infinitely, + // we simply terminate it. + c.forget(false) + c.stop() + } +} + +func (c *cacheWatcher) sendWatchCacheEvent(event watchCacheEvent) { + curObjPasses := event.Type != watch.Deleted && c.filter(event.Object) + oldObjPasses := false + if event.PrevObject != nil { + oldObjPasses = c.filter(event.PrevObject) + } + if !curObjPasses && !oldObjPasses { + // Watcher is not interested in that object. + return + } + + object, err := api.Scheme.Copy(event.Object) + if err != nil { + glog.Errorf("unexpected copy error: %v", err) + return + } + switch { + case curObjPasses && !oldObjPasses: + c.result <- watch.Event{Type: watch.Added, Object: object} + case curObjPasses && oldObjPasses: + c.result <- watch.Event{Type: watch.Modified, Object: object} + case !curObjPasses && oldObjPasses: + c.result <- watch.Event{Type: watch.Deleted, Object: object} + } +} + +func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) { + defer utilruntime.HandleCrash() + + for _, event := range initEvents { + c.sendWatchCacheEvent(event) + } + defer close(c.result) + defer c.Stop() + for { + event, ok := <-c.input + if !ok { + return + } + // only send events newer than resourceVersion + if event.ResourceVersion > resourceVersion { + c.sendWatchCacheEvent(event) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/storage/doc.go new file mode 100644 index 00000000..dca0d5b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Interfaces for database-related operations. +package storage diff --git a/vendor/k8s.io/kubernetes/pkg/storage/errors.go b/vendor/k8s.io/kubernetes/pkg/storage/errors.go new file mode 100644 index 00000000..61b3cba5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/errors.go @@ -0,0 +1,174 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/util/validation/field" +) + +const ( + ErrCodeKeyNotFound int = iota + 1 + ErrCodeKeyExists + ErrCodeResourceVersionConflicts + ErrCodeInvalidObj + ErrCodeUnreachable +) + +var errCodeToMessage = map[int]string{ + ErrCodeKeyNotFound: "key not found", + ErrCodeKeyExists: "key exists", + ErrCodeResourceVersionConflicts: "resource version conflicts", + ErrCodeInvalidObj: "invalid object", + ErrCodeUnreachable: "server unreachable", +} + +func NewKeyNotFoundError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyNotFound, + Key: key, + ResourceVersion: rv, + } +} + +func NewKeyExistsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeKeyExists, + Key: key, + ResourceVersion: rv, + } +} + +func NewResourceVersionConflictsError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeResourceVersionConflicts, + Key: key, + ResourceVersion: rv, + } +} + +func NewUnreachableError(key string, rv int64) *StorageError { + return &StorageError{ + Code: ErrCodeUnreachable, + Key: key, + ResourceVersion: rv, + } +} + +func NewInvalidObjError(key, msg string) *StorageError { + return &StorageError{ + Code: ErrCodeInvalidObj, + Key: key, + AdditionalErrorMsg: msg, + } +} + +type StorageError struct { + Code int + Key string + ResourceVersion int64 + AdditionalErrorMsg string +} + +func (e *StorageError) Error() string { + return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", + errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) +} + +// IsNotFound returns true if and only if err is "key" not found error. +func IsNotFound(err error) bool { + return isErrCode(err, ErrCodeKeyNotFound) +} + +// IsNodeExist returns true if and only if err is an node already exist error. +func IsNodeExist(err error) bool { + return isErrCode(err, ErrCodeKeyExists) +} + +// IsUnreachable returns true if and only if err indicates the server could not be reached. +func IsUnreachable(err error) bool { + return isErrCode(err, ErrCodeUnreachable) +} + +// IsTestFailed returns true if and only if err is a write conflict. +func IsTestFailed(err error) bool { + return isErrCode(err, ErrCodeResourceVersionConflicts, ErrCodeInvalidObj) +} + +// IsInvalidUID returns true if and only if err is invalid UID error +func IsInvalidObj(err error) bool { + return isErrCode(err, ErrCodeInvalidObj) +} + +func isErrCode(err error, codes ...int) bool { + if err == nil { + return false + } + if e, ok := err.(*StorageError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// InvalidError is generated when an error caused by invalid API object occurs +// in the storage package. +type InvalidError struct { + Errs field.ErrorList +} + +func (e InvalidError) Error() string { + return e.Errs.ToAggregate().Error() +} + +// IsInvalidError returns true if and only if err is an InvalidError. +func IsInvalidError(err error) bool { + _, ok := err.(InvalidError) + return ok +} + +func NewInvalidError(errors field.ErrorList) InvalidError { + return InvalidError{errors} +} + +// InternalError is generated when an error occurs in the storage package, i.e., +// not from the underlying storage backend (e.g., etcd). +type InternalError struct { + Reason string +} + +func (e InternalError) Error() string { + return e.Reason +} + +// IsInternalError returns true if and only if err is an InternalError. +func IsInternalError(err error) bool { + _, ok := err.(InternalError) + return ok +} + +func NewInternalError(reason string) InternalError { + return InternalError{reason} +} + +func NewInternalErrorf(format string, a ...interface{}) InternalError { + return InternalError{fmt.Sprintf(format, a)} +} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go b/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go new file mode 100644 index 00000000..89290e29 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go @@ -0,0 +1,171 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "golang.org/x/net/context" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/watch" +) + +// Versioner abstracts setting and retrieving metadata fields from database response +// onto the object ot list. +type Versioner interface { + // UpdateObject sets storage metadata into an API object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateObject(obj runtime.Object, resourceVersion uint64) error + // UpdateList sets the resource version into an API list object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateList(obj runtime.Object, resourceVersion uint64) error + // ObjectResourceVersion returns the resource version (for persistence) of the specified object. + // Should return an error if the specified object does not have a persistable version. + ObjectResourceVersion(obj runtime.Object) (uint64, error) +} + +// ResponseMeta contains information about the database metadata that is associated with +// an object. It abstracts the actual underlying objects to prevent coupling with concrete +// database and to improve testability. +type ResponseMeta struct { + // TTL is the time to live of the node that contained the returned object. It may be + // zero or negative in some cases (objects may be expired after the requested + // expiration time due to server lag). + TTL int64 + // The resource version of the node that contained the returned object. + ResourceVersion uint64 +} + +// FilterFunc is a predicate which takes an API object and returns true +// if and only if the object should remain in the set. +type FilterFunc func(obj runtime.Object) bool + +// Everything is a FilterFunc which accepts all objects. +func Everything(runtime.Object) bool { + return true +} + +// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update +// that is guaranteed to succeed. +// See the comment for GuaranteedUpdate for more details. +type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + UID *types.UID `json:"uid,omitempty"` +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// Interface offers a common interface for object marshaling/unmarshling operations and +// hides all the storage-related operations behind it. +type Interface interface { + // Returns list of servers addresses of the underyling database. + // TODO: This method is used only in a single place. Consider refactoring and getting rid + // of this method from the interface. + Backends(ctx context.Context) []string + + // Returns Versioner associated with this interface. + Versioner() Versioner + + // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live + // in seconds (0 means forever). If no error is returned and out is not nil, out will be + // set to the read value from database. + Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error + + // Delete removes the specified key and returns the value that existed at that spot. + // If key didn't exist, it will return NotFound storage error. + Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error + + // Watch begins watching the specified key. Events are decoded into API objects, + // and any items passing 'filter' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) + + // WatchList begins watching the specified key's items. Items are decoded into API + // objects and any item passing 'filter' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching, + // which should be the current resourceVersion, and no longer rv+1 + // (e.g. reconnecting without missing any updates). + WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) + + // Get unmarshals json found at key into objPtr. On a not found error, will either + // return a zero object of the requested type, or an error, depending on ignoreNotFound. + // Treats empty responses and nil response nodes exactly like a not found error. + Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error + + // GetToList unmarshals json found at key and opaque it into *List api object + // (an object that satisfies the runtime.IsList definition). + GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error + + // List unmarshalls jsons found at directory defined by key and opaque them + // into *List api object (an object that satisfies runtime.IsList definition). + // The returned contents may be delayed, but it is guaranteed that they will + // be have at least 'resourceVersion'. + List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error + + // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') + // retrying the update until success if there is index conflict. + // Note that object passed to tryUpdate may change across invocations of tryUpdate() if + // other writers are simultaneously updating it, so tryUpdate() needs to take into account + // the current contents of the object when deciding how the update object should look. + // If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false + // or zero value in 'ptrToType' parameter otherwise. + // If the object to update has the same value as previous, it won't do any update + // but will return the object in 'ptrToType' parameter. + // + // Example: + // + // s := /* implementation of Interface */ + // err := s.GuaranteedUpdate( + // "myKey", &MyType{}, true, + // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + // // Before each incovation of the user defined function, "input" is reset to + // // current contents for "myKey" in database. + // curr := input.(*MyType) // Guaranteed to succeed. + // + // // Make the modification + // curr.Counter++ + // + // // Return the modified object - return an error to stop iterating. Return + // // a uint64 to alter the TTL on the object, or nil to keep it the same value. + // return cur, nil, nil + // } + // }) + GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, precondtions *Preconditions, tryUpdate UpdateFunc) error + + // Codec provides access to the underlying codec being used by the implementation. + Codec() runtime.Codec +} + +// Config interface allows storage tiers to generate the proper storage.interface +// and reduce the dependencies to encapsulate storage. +type Config interface { + // Creates the Interface base on ConfigObject + NewStorage() (Interface, error) + + // This function is used to enforce membership, and return the underlying type + GetType() string +} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/util.go b/vendor/k8s.io/kubernetes/pkg/storage/util.go new file mode 100644 index 00000000..6595a676 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/util.go @@ -0,0 +1,90 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "strconv" + + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/validation/field" +) + +type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) + +// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc +func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { + return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { + out, err := fn(input) + return out, nil, err + } +} + +// ParseWatchResourceVersion takes a resource version argument and converts it to +// the etcd version we should pass to helper.Watch(). Because resourceVersion is +// an opaque value, the default watch behavior for non-zero watch is to watch +// the next value (if you pass "1", you will see updates from "2" onwards). +func ParseWatchResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" || resourceVersion == "0" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + if err != nil { + return 0, NewInvalidError(field.ErrorList{ + // Validation errors are supposed to return version-specific field + // paths, but this is probably close enough. + field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), + }) + } + return version, nil +} + +// ParseListResourceVersion takes a resource version argument and converts it to +// the etcd version. +func ParseListResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + return version, err +} + +func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + meta.GetNamespace() + "/" + name, nil +} + +func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return "", err + } + name := meta.GetName() + if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", fmt.Errorf("invalid name: %v", msgs) + } + return prefix + "/" + name, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go b/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go new file mode 100644 index 00000000..3e5ce5d7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go @@ -0,0 +1,331 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/client/cache" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/watch" +) + +const ( + // MaximumListWait determines how long we're willing to wait for a + // list if a client specified a resource version in the future. + MaximumListWait = 60 * time.Second +) + +// watchCacheEvent is a single "watch event" that is send to users of +// watchCache. Additionally to a typical "watch.Event" it contains +// the previous value of the object to enable proper filtering in the +// upper layers. +type watchCacheEvent struct { + Type watch.EventType + Object runtime.Object + PrevObject runtime.Object + ResourceVersion uint64 +} + +// watchCacheElement is a single "watch event" stored in a cache. +// It contains the resource version of the object and the object +// itself. +type watchCacheElement struct { + resourceVersion uint64 + watchCacheEvent watchCacheEvent +} + +// watchCache implements a Store interface. +// However, it depends on the elements implementing runtime.Object interface. +// +// watchCache is a "sliding window" (with a limited capacity) of objects +// observed from a watch. +type watchCache struct { + sync.RWMutex + + // Condition on which lists are waiting for the fresh enough + // resource version. + cond *sync.Cond + + // Maximum size of history window. + capacity int + + // cache is used a cyclic buffer - its first element (with the smallest + // resourceVersion) is defined by startIndex, its last element is defined + // by endIndex (if cache is full it will be startIndex + capacity). + // Both startIndex and endIndex can be greater than buffer capacity - + // you should always apply modulo capacity to get an index in cache array. + cache []watchCacheElement + startIndex int + endIndex int + + // store will effectively support LIST operation from the "end of cache + // history" i.e. from the moment just after the newest cached watched event. + // It is necessary to effectively allow clients to start watching at now. + store cache.Store + + // ResourceVersion up to which the watchCache is propagated. + resourceVersion uint64 + + // This handler is run at the end of every successful Replace() method. + onReplace func() + + // This handler is run at the end of every Add/Update/Delete method + // and additionally gets the previous value of the object. + onEvent func(watchCacheEvent) + + // for testing timeouts. + clock util.Clock +} + +func newWatchCache(capacity int) *watchCache { + wc := &watchCache{ + capacity: capacity, + cache: make([]watchCacheElement, capacity), + startIndex: 0, + endIndex: 0, + store: cache.NewStore(cache.MetaNamespaceKeyFunc), + resourceVersion: 0, + clock: util.RealClock{}, + } + wc.cond = sync.NewCond(wc.RLocker()) + return wc +} + +func (w *watchCache) Add(obj interface{}) error { + object, resourceVersion, err := objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Added, Object: object} + + f := func(obj runtime.Object) error { return w.store.Add(obj) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) Update(obj interface{}) error { + object, resourceVersion, err := objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Modified, Object: object} + + f := func(obj runtime.Object) error { return w.store.Update(obj) } + return w.processEvent(event, resourceVersion, f) +} + +func (w *watchCache) Delete(obj interface{}) error { + object, resourceVersion, err := objectToVersionedRuntimeObject(obj) + if err != nil { + return err + } + event := watch.Event{Type: watch.Deleted, Object: object} + + f := func(obj runtime.Object) error { return w.store.Delete(obj) } + return w.processEvent(event, resourceVersion, f) +} + +func objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { + object, ok := obj.(runtime.Object) + if !ok { + return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) + } + meta, err := meta.Accessor(object) + if err != nil { + return nil, 0, err + } + resourceVersion, err := parseResourceVersion(meta.GetResourceVersion()) + if err != nil { + return nil, 0, err + } + return object, resourceVersion, nil +} + +func parseResourceVersion(resourceVersion string) (uint64, error) { + if resourceVersion == "" { + return 0, nil + } + return strconv.ParseUint(resourceVersion, 10, 64) +} + +func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(runtime.Object) error) error { + w.Lock() + defer w.Unlock() + previous, exists, err := w.store.Get(event.Object) + if err != nil { + return err + } + var prevObject runtime.Object + if exists { + prevObject = previous.(runtime.Object) + } + watchCacheEvent := watchCacheEvent{event.Type, event.Object, prevObject, resourceVersion} + if w.onEvent != nil { + w.onEvent(watchCacheEvent) + } + w.updateCache(resourceVersion, watchCacheEvent) + w.resourceVersion = resourceVersion + w.cond.Broadcast() + return updateFunc(event.Object) +} + +// Assumes that lock is already held for write. +func (w *watchCache) updateCache(resourceVersion uint64, event watchCacheEvent) { + if w.endIndex == w.startIndex+w.capacity { + // Cache is full - remove the oldest element. + w.startIndex++ + } + w.cache[w.endIndex%w.capacity] = watchCacheElement{resourceVersion, event} + w.endIndex++ +} + +func (w *watchCache) List() []interface{} { + w.RLock() + defer w.RUnlock() + return w.store.List() +} + +func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64) ([]interface{}, uint64, error) { + startTime := w.clock.Now() + go func() { + // Wake us up when the time limit has expired. The docs + // promise that time.After (well, NewTimer, which it calls) + // will wait *at least* the duration given. Since this go + // routine starts sometime after we record the start time, and + // it will wake up the loop below sometime after the broadcast, + // we don't need to worry about waking it up before the time + // has expired accidentally. + <-w.clock.After(MaximumListWait) + w.cond.Broadcast() + }() + + w.RLock() + defer w.RUnlock() + for w.resourceVersion < resourceVersion { + if w.clock.Since(startTime) >= MaximumListWait { + return nil, 0, fmt.Errorf("time limit exceeded while waiting for resource version %v (current value: %v)", resourceVersion, w.resourceVersion) + } + w.cond.Wait() + } + return w.store.List(), w.resourceVersion, nil +} + +func (w *watchCache) ListKeys() []string { + w.RLock() + defer w.RUnlock() + return w.store.ListKeys() +} + +func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { + w.RLock() + defer w.RUnlock() + return w.store.Get(obj) +} + +func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { + w.RLock() + defer w.RUnlock() + return w.store.GetByKey(key) +} + +func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { + version, err := parseResourceVersion(resourceVersion) + if err != nil { + return err + } + + w.Lock() + defer w.Unlock() + + w.startIndex = 0 + w.endIndex = 0 + if err := w.store.Replace(objs, resourceVersion); err != nil { + return err + } + w.resourceVersion = version + if w.onReplace != nil { + w.onReplace() + } + w.cond.Broadcast() + return nil +} + +func (w *watchCache) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *watchCache) SetOnEvent(onEvent func(watchCacheEvent)) { + w.Lock() + defer w.Unlock() + w.onEvent = onEvent +} + +func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]watchCacheEvent, error) { + size := w.endIndex - w.startIndex + oldest := w.resourceVersion + if size > 0 { + oldest = w.cache[w.startIndex%w.capacity].resourceVersion + } + if resourceVersion == 0 { + // resourceVersion = 0 means that we don't require any specific starting point + // and we would like to start watching from ~now. + // However, to keep backward compatibility, we additionally need to return the + // current state and only then start watching from that point. + // + // TODO: In v2 api, we should stop returning the current state - #13969. + allItems := w.store.List() + result := make([]watchCacheEvent, len(allItems)) + for i, item := range allItems { + result[i] = watchCacheEvent{Type: watch.Added, Object: item.(runtime.Object)} + } + return result, nil + } + if resourceVersion < oldest-1 { + return nil, errors.NewGone(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) + } + + // Binary search the smallest index at which resourceVersion is greater than the given one. + f := func(i int) bool { + return w.cache[(w.startIndex+i)%w.capacity].resourceVersion > resourceVersion + } + first := sort.Search(size, f) + result := make([]watchCacheEvent, size-first) + for i := 0; i < size-first; i++ { + result[i] = w.cache[(w.startIndex+first+i)%w.capacity].watchCacheEvent + } + return result, nil +} + +func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]watchCacheEvent, error) { + w.RLock() + defer w.RUnlock() + return w.GetAllEventsSinceThreadUnsafe(resourceVersion) +} + +func (w *watchCache) Resync() error { + // Nothing to do + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go b/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go new file mode 100644 index 00000000..f4366436 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go @@ -0,0 +1,190 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crypto + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "math/big" + "net" + "os" + "path/filepath" + "time" +) + +// ShouldGenSelfSignedCerts returns false if the certificate or key files already exists, +// otherwise returns true. +func ShouldGenSelfSignedCerts(certPath, keyPath string) bool { + if canReadFile(certPath) || canReadFile(keyPath) { + return false + } + + return true +} + +// If the file represented by path exists and +// readable, returns true otherwise returns false. +func canReadFile(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + + defer f.Close() + + return true +} + +// GenerateSelfSignedCert creates a self-signed certificate and key for the given host. +// Host may be an IP or a DNS name +// You may also specify additional subject alt names (either ip or dns names) for the certificate +// The certificate will be created with file mode 0644. The key will be created with file mode 0600. +// If the certificate or key files already exist, they will be overwritten. +// Any parent directories of the certPath or keyPath will be created as needed with file mode 0755. +func GenerateSelfSignedCert(host, certPath, keyPath string, alternateIPs []net.IP, alternateDNS []string) error { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return err + } + + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 365), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, host) + } + + template.IPAddresses = append(template.IPAddresses, alternateIPs...) + template.DNSNames = append(template.DNSNames, alternateDNS...) + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return err + } + + // Generate cert + certBuffer := bytes.Buffer{} + if err := pem.Encode(&certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return err + } + + // Generate key + keyBuffer := bytes.Buffer{} + if err := pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return err + } + + // Write cert + if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(certPath, certBuffer.Bytes(), os.FileMode(0644)); err != nil { + return err + } + + // Write key + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(keyPath, keyBuffer.Bytes(), os.FileMode(0600)); err != nil { + return err + } + + return nil +} + +// CertPoolFromFile returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func CertPoolFromFile(filename string) (*x509.CertPool, error) { + certs, err := certificatesFromFile(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// certificatesFromFile returns the x509.Certificates contained in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func certificatesFromFile(file string) ([]*x509.Certificate, error) { + if len(file) == 0 { + return nil, errors.New("error reading certificates from an empty filename") + } + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + certs, err := CertsFromPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", file, err) + } + return certs, nil +} + +// CertsFromPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("could not read any certificates") + } + return certs, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go b/vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go new file mode 100644 index 00000000..0442da42 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go @@ -0,0 +1,478 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "fmt" + "strconv" + "time" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/integer" + intstrutil "k8s.io/kubernetes/pkg/util/intstr" + labelsutil "k8s.io/kubernetes/pkg/util/labels" + podutil "k8s.io/kubernetes/pkg/util/pod" + rsutil "k8s.io/kubernetes/pkg/util/replicaset" + "k8s.io/kubernetes/pkg/util/wait" +) + +const ( + // The revision annotation of a deployment's replica sets which records its rollout sequence + RevisionAnnotation = "deployment.kubernetes.io/revision" + + // Here are the possible rollback event reasons + RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" + RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" + RollbackDone = "DeploymentRollback" +) + +// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. +// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. +// The third returned value is the new replica set, and it may be nil if it doesn't exist yet. +func GetAllReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, c) + if err != nil { + return nil, nil, nil, err + } + podList, err := listPods(deployment, c) + if err != nil { + return nil, nil, nil, err + } + oldRSes, allOldRSes, err := FindOldReplicaSets(deployment, rsList, podList) + if err != nil { + return nil, nil, nil, err + } + newRS, err := FindNewReplicaSet(deployment, rsList) + if err != nil { + return nil, nil, nil, err + } + return oldRSes, allOldRSes, newRS, nil +} + +// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. +// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. +func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, c) + if err != nil { + return nil, nil, err + } + podList, err := listPods(deployment, c) + if err != nil { + return nil, nil, err + } + return FindOldReplicaSets(deployment, rsList, podList) +} + +// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. +// Returns nil if the new replica set doesn't exist yet. +func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { + rsList, err := listReplicaSets(deployment, c) + if err != nil { + return nil, err + } + return FindNewReplicaSet(deployment, rsList) +} + +// listReplicaSets lists all RSes the given deployment targets with the given client interface. +func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]extensions.ReplicaSet, error) { + return ListReplicaSets(deployment, + func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { + rsList, err := c.Extensions().ReplicaSets(namespace).List(options) + return rsList.Items, err + }) +} + +// listReplicaSets lists all Pods the given deployment targets with the given client interface. +func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.PodList, error) { + return ListPods(deployment, + func(namespace string, options api.ListOptions) (*api.PodList, error) { + return c.Core().Pods(namespace).List(options) + }) +} + +// TODO: switch this to full namespacers +type rsListFunc func(string, api.ListOptions) ([]extensions.ReplicaSet, error) +type podListFunc func(string, api.ListOptions) (*api.PodList, error) + +// ListReplicaSets returns a slice of RSes the given deployment targets. +func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]extensions.ReplicaSet, error) { + // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector + // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830; + // or use controllerRef, see https://github.com/kubernetes/kubernetes/issues/2210 + namespace := deployment.Namespace + selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := api.ListOptions{LabelSelector: selector} + return getRSList(namespace, options) +} + +// ListPods returns a list of pods the given deployment targets. +func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.PodList, error) { + namespace := deployment.Namespace + selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + options := api.ListOptions{LabelSelector: selector} + return getPodList(namespace, options) +} + +// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] +// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// Note that we assume input podTemplateSpecs contain non-empty labels +func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) { + // The podTemplateSpec must have a non-empty label so that label selectors can find them. + // This is checked by validation (of resources contain a podTemplateSpec). + if len(template1.Labels) == 0 || len(template2.Labels) == 0 { + return false, fmt.Errorf("Unexpected empty labels found in given template") + } + hash1 := template1.Labels[extensions.DefaultDeploymentUniqueLabelKey] + hash2 := template2.Labels[extensions.DefaultDeploymentUniqueLabelKey] + // compare equality ignoring pod-template-hash + template1.Labels[extensions.DefaultDeploymentUniqueLabelKey] = hash2 + result := api.Semantic.DeepEqual(template1, template2) + template1.Labels[extensions.DefaultDeploymentUniqueLabelKey] = hash1 + return result, nil +} + +// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). +func FindNewReplicaSet(deployment *extensions.Deployment, rsList []extensions.ReplicaSet) (*extensions.ReplicaSet, error) { + newRSTemplate := GetNewReplicaSetTemplate(deployment) + for i := range rsList { + equal, err := equalIgnoreHash(rsList[i].Spec.Template, newRSTemplate) + if err != nil { + return nil, err + } + if equal { + // This is the new ReplicaSet. + return &rsList[i], nil + } + } + // new ReplicaSet does not exist. + return nil, nil +} + +// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes. +// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. +func FindOldReplicaSets(deployment *extensions.Deployment, rsList []extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { + // Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList. + // All pods and replica sets are labeled with pod-template-hash to prevent overlapping + oldRSs := map[string]extensions.ReplicaSet{} + allOldRSs := map[string]extensions.ReplicaSet{} + newRSTemplate := GetNewReplicaSetTemplate(deployment) + for _, pod := range podList.Items { + podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) + for _, rs := range rsList { + rsLabelsSelector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, nil, fmt.Errorf("invalid label selector: %v", err) + } + // Filter out replica set that has the same pod template spec as the deployment - that is the new replica set. + equal, err := equalIgnoreHash(rs.Spec.Template, newRSTemplate) + if err != nil { + return nil, nil, err + } + if equal { + continue + } + allOldRSs[rs.ObjectMeta.Name] = rs + if rsLabelsSelector.Matches(podLabelsSelector) { + oldRSs[rs.ObjectMeta.Name] = rs + } + } + } + requiredRSs := []*extensions.ReplicaSet{} + for key := range oldRSs { + value := oldRSs[key] + requiredRSs = append(requiredRSs, &value) + } + allRSs := []*extensions.ReplicaSet{} + for key := range allOldRSs { + value := allOldRSs[key] + allRSs = append(allRSs, &value) + } + return requiredRSs, allRSs, nil +} + +func WaitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { + return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { + rs, err := c.Extensions().ReplicaSets(namespace).Get(name) + if err != nil { + return false, err + } + return rs.Status.ObservedGeneration >= desiredGeneration, nil + }) +} + +func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { + return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { + rs, err := c.Extensions().ReplicaSets(namespace).Get(name) + if err != nil { + return false, err + } + return rs.Status.ObservedGeneration >= desiredGeneration && + rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil + }) +} + +// LabelPodsWithHash labels all pods in the given podList with the new hash label. +// The returned bool value can be used to tell if all pods are actually labeled. +func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) { + allPodsLabeled := true + for _, pod := range podList.Items { + // Only label the pod that doesn't already have the new hash + if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { + if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod, + func(podToUpdate *api.Pod) error { + // Precondition: the pod doesn't contain the new hash in its label. + if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { + return errors.ErrPreconditionViolated + } + podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) + return nil + }); err != nil { + return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err) + } else if podUpdated { + glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash) + } else { + // If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error. + // Then we can't say all pods are labeled + allPodsLabeled = false + } + } + } + return allPodsLabeled, nil +} + +// Returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet. +func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec { + // newRS will have the same template as in deployment spec, plus a unique label in some cases. + newRSTemplate := api.PodTemplateSpec{ + ObjectMeta: deployment.Spec.Template.ObjectMeta, + Spec: deployment.Spec.Template.Spec, + } + newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( + deployment.Spec.Template.ObjectMeta.Labels, + extensions.DefaultDeploymentUniqueLabelKey, + podutil.GetPodTemplateSpecHash(newRSTemplate)) + return newRSTemplate +} + +// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. +func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment { + deployment.Spec.Template.ObjectMeta = template.ObjectMeta + deployment.Spec.Template.Spec = template.Spec + deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( + deployment.Spec.Template.ObjectMeta.Labels, + extensions.DefaultDeploymentUniqueLabelKey) + return deployment +} + +// Returns the sum of Replicas of the given replica sets. +func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { + totalReplicaCount := int32(0) + for _, rs := range replicaSets { + if rs != nil { + totalReplicaCount += rs.Spec.Replicas + } + } + return totalReplicaCount +} + +// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. +func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { + totalReplicaCount := int32(0) + for _, rs := range replicaSets { + if rs != nil { + totalReplicaCount += rs.Status.Replicas + } + } + return totalReplicaCount +} + +// GetAvailablePodsForReplicaSets returns the number of available pods (listed from clientset) corresponding to the given replica sets. +func GetAvailablePodsForReplicaSets(c clientset.Interface, deployment *extensions.Deployment, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) { + podList, err := listPods(deployment, c) + if err != nil { + return 0, err + } + return CountAvailablePodsForReplicaSets(podList, rss, minReadySeconds) +} + +// CountAvailablePodsForReplicaSets returns the number of available pods corresponding to the given pod list and replica sets. +// Note that the input pod list should be the pods targeted by the deployment of input replica sets. +func CountAvailablePodsForReplicaSets(podList *api.PodList, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) { + rsPods, err := filterPodsMatchingReplicaSets(rss, podList) + if err != nil { + return 0, err + } + return countAvailablePods(rsPods, minReadySeconds), nil +} + +// GetAvailablePodsForDeployment returns the number of available pods (listed from clientset) corresponding to the given deployment. +func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment, minReadySeconds int32) (int32, error) { + podList, err := listPods(deployment, c) + if err != nil { + return 0, err + } + return countAvailablePods(podList.Items, minReadySeconds), nil +} + +func countAvailablePods(pods []api.Pod, minReadySeconds int32) int32 { + availablePodCount := int32(0) + for _, pod := range pods { + if IsPodAvailable(&pod, minReadySeconds) { + availablePodCount++ + } + } + return availablePodCount +} + +func IsPodAvailable(pod *api.Pod, minReadySeconds int32) bool { + if !controller.IsPodActive(*pod) { + return false + } + // Check if we've passed minReadySeconds since LastTransitionTime + // If so, this pod is ready + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == api.PodReady && c.Status == api.ConditionTrue { + // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available): + // 1. minReadySeconds == 0, or + // 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) { + return true + } + } + } + return false +} + +// filterPodsMatchingReplicaSets filters the given pod list and only return the ones targeted by the input replicasets +func filterPodsMatchingReplicaSets(replicaSets []*extensions.ReplicaSet, podList *api.PodList) ([]api.Pod, error) { + rsPods := []api.Pod{} + for _, rs := range replicaSets { + matchingFunc, err := rsutil.MatchingPodsFunc(rs) + if err != nil { + return nil, err + } + if matchingFunc == nil { + continue + } + rsPods = append(rsPods, podutil.Filter(podList, matchingFunc)...) + } + return rsPods, nil +} + +// Revision returns the revision number of the input replica set +func Revision(rs *extensions.ReplicaSet) (int64, error) { + v, ok := rs.Annotations[RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} + +func IsRollingUpdate(deployment *extensions.Deployment) bool { + return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType +} + +// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have. +// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. +// 1) The new RS is saturated: newRS's replicas == deployment's replicas +// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas +func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { + switch deployment.Spec.Strategy.Type { + case extensions.RollingUpdateDeploymentStrategyType: + // Check if we can scale up. + maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true) + if err != nil { + return 0, err + } + // Find the total number of pods + currentPodCount := GetReplicaCountForReplicaSets(allRSs) + maxTotalPods := deployment.Spec.Replicas + int32(maxSurge) + if currentPodCount >= maxTotalPods { + // Cannot scale up. + return newRS.Spec.Replicas, nil + } + // Scale up. + scaleUpCount := maxTotalPods - currentPodCount + // Do not exceed the number of desired replicas. + scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas))) + return newRS.Spec.Replicas + scaleUpCount, nil + case extensions.RecreateDeploymentStrategyType: + return deployment.Spec.Replicas, nil + default: + return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) + } +} + +// Polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. +// Returns error if polling timesout. +func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { + // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. + return wait.Poll(interval, timeout, func() (bool, error) { + deployment, err := getDeploymentFunc() + if err != nil { + return false, err + } + return deployment.Status.ObservedGeneration >= desiredGeneration, nil + }) +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/diff/diff.go b/vendor/k8s.io/kubernetes/pkg/util/diff/diff.go new file mode 100644 index 00000000..b9771375 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/diff/diff.go @@ -0,0 +1,267 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + "text/tabwriter" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/kubernetes/pkg/util/validation/field" +) + +// StringDiff diffs a and b and returns a human readable diff. +func StringDiff(a, b string) string { + ba := []byte(a) + bb := []byte(b) + out := []byte{} + i := 0 + for ; i < len(ba) && i < len(bb); i++ { + if ba[i] != bb[i] { + break + } + out = append(out, ba[i]) + } + out = append(out, []byte("\n\nA: ")...) + out = append(out, ba[i:]...) + out = append(out, []byte("\n\nB: ")...) + out = append(out, bb[i:]...) + out = append(out, []byte("\n\n")...) + return string(out) +} + +// ObjectDiff writes the two objects out as JSON and prints out the identical part of +// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. +// For debugging tests. +func ObjectDiff(a, b interface{}) string { + ab, err := json.Marshal(a) + if err != nil { + panic(fmt.Sprintf("a: %v", err)) + } + bb, err := json.Marshal(b) + if err != nil { + panic(fmt.Sprintf("b: %v", err)) + } + return StringDiff(string(ab), string(bb)) +} + +// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, +// which shows absolutely everything by recursing into every single pointer +// (go's %#v formatters OTOH stop at a certain point). This is needed when you +// can't figure out why reflect.DeepEqual is returning false and nothing is +// showing you differences. This will. +func ObjectGoPrintDiff(a, b interface{}) string { + s := spew.ConfigState{DisableMethods: true} + return StringDiff( + s.Sprintf("%#v", a), + s.Sprintf("%#v", b), + ) +} + +func ObjectReflectDiff(a, b interface{}) string { + vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) + if vA.Type() != vB.Type() { + return fmt.Sprintf("type A %T and type B %T do not match", a, b) + } + diffs := objectReflectDiff(field.NewPath("object"), vA, vB) + if len(diffs) == 0 { + return "" + } + out := []string{""} + for _, d := range diffs { + out = append(out, + fmt.Sprintf("%s:", d.path), + limit(fmt.Sprintf(" a: %#v", d.a), 80), + limit(fmt.Sprintf(" b: %#v", d.b), 80), + ) + } + return strings.Join(out, "\n") +} + +func limit(s string, max int) string { + if len(s) > max { + return s[:max] + } + return s +} + +func public(s string) bool { + if len(s) == 0 { + return false + } + return s[:1] == strings.ToUpper(s[:1]) +} + +type diff struct { + path *field.Path + a, b interface{} +} + +type orderedDiffs []diff + +func (d orderedDiffs) Len() int { return len(d) } +func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d orderedDiffs) Less(i, j int) bool { + a, b := d[i].path.String(), d[j].path.String() + if a < b { + return true + } + return false +} + +func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { + switch a.Type().Kind() { + case reflect.Struct: + var changes []diff + for i := 0; i < a.Type().NumField(); i++ { + if !public(a.Type().Field(i).Name) { + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { + changes = append(changes, sub...) + } + } + return changes + case reflect.Ptr: + if a.IsNil() || b.IsNil() { + switch { + case a.IsNil() && b.IsNil(): + return nil + case a.IsNil(): + return []diff{{path: path, a: nil, b: b.Interface()}} + default: + return []diff{{path: path, a: a.Interface(), b: nil}} + } + } + return objectReflectDiff(path, a.Elem(), b.Elem()) + case reflect.Chan: + if !reflect.DeepEqual(a.Interface(), b.Interface()) { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + case reflect.Slice: + if reflect.DeepEqual(a, b) { + return nil + } + lA, lB := a.Len(), b.Len() + l := lA + if lB < lA { + l = lB + } + for i := 0; i < l; i++ { + if !reflect.DeepEqual(a.Index(i), b.Index(i)) { + return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) + } + } + var diffs []diff + for i := l; i < lA; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) + } + for i := l; i < lB; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) + } + return diffs + case reflect.Map: + if reflect.DeepEqual(a, b) { + return nil + } + aKeys := make(map[interface{}]interface{}) + for _, key := range a.MapKeys() { + aKeys[key.Interface()] = a.MapIndex(key).Interface() + } + var missing []diff + for _, key := range b.MapKeys() { + if _, ok := aKeys[key.Interface()]; ok { + delete(aKeys, key.Interface()) + if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: a.MapIndex(key).Interface(), b: b.MapIndex(key).Interface()}) + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) + } + for key, value := range aKeys { + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) + } + sort.Sort(orderedDiffs(missing)) + return missing + default: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + if !a.CanInterface() { + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + s := spew.ConfigState{ + Indent: " ", + // Extra deep spew. + DisableMethods: true, + } + sA := s.Sdump(a) + sB := s.Sdump(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go b/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go new file mode 100644 index 00000000..94b9f733 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "strings" + + "github.com/golang/glog" + "github.com/spf13/pflag" +) + +// WordSepNormalizeFunc changes all flags that contain "_" separators +func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) + } + return pflag.NormalizedName(name) +} + +// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators +func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + nname := strings.Replace(name, "_", "-", -1) + glog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) + + return pflag.NormalizedName(nname) + } + return pflag.NormalizedName(name) +} + +// InitFlags normalizes and parses the command line flags +func InitFlags() { + pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + pflag.Parse() +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go b/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go new file mode 100644 index 00000000..a9359695 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "strconv" +) + +// Tristate is a flag compatible with flags and pflags that +// keeps track of whether it had a value supplied or not. +type Tristate int + +const ( + Unset Tristate = iota // 0 + True + False +) + +func (f *Tristate) Default(value bool) { + *f = triFromBool(value) +} + +func (f Tristate) String() string { + b := boolFromTri(f) + return fmt.Sprintf("%t", b) +} + +func (f Tristate) Value() bool { + b := boolFromTri(f) + return b +} + +func (f *Tristate) Set(value string) error { + boolVal, err := strconv.ParseBool(value) + if err != nil { + return err + } + + *f = triFromBool(boolVal) + return nil +} + +func (f Tristate) Provided() bool { + if f != Unset { + return true + } + return false +} + +func (f *Tristate) Type() string { + return "tristate" +} + +func boolFromTri(t Tristate) bool { + if t == True { + return true + } else { + return false + } +} + +func triFromBool(b bool) Tristate { + if b { + return True + } else { + return False + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go new file mode 100644 index 00000000..1898c55c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "time" + + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/integer" +) + +type backoffEntry struct { + backoff time.Duration + lastUpdate time.Time +} + +type Backoff struct { + sync.Mutex + Clock util.Clock + defaultDuration time.Duration + maxDuration time.Duration + perItemBackoff map[string]*backoffEntry +} + +func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: tc, + defaultDuration: initial, + maxDuration: max, + } +} + +func NewBackOff(initial, max time.Duration) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: util.RealClock{}, + defaultDuration: initial, + maxDuration: max, + } +} + +// Get the current backoff Duration +func (p *Backoff) Get(id string) time.Duration { + p.Lock() + defer p.Unlock() + var delay time.Duration + entry, ok := p.perItemBackoff[id] + if ok { + delay = entry.backoff + } + return delay +} + +// move backoff to the next mark, capping at maxDuration +func (p *Backoff) Next(id string, eventTime time.Time) { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + entry = p.initEntryUnsafe(id) + } else { + delay := entry.backoff * 2 // exponential + entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + } + entry.lastUpdate = p.Clock.Now() +} + +// Reset forces clearing of all backoff data for a given key. +func (p *Backoff) Reset(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Returns True if the elapsed time since eventTime is smaller than the current backoff window +func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return p.Clock.Now().Sub(eventTime) < entry.backoff +} + +// Returns True if time since lastupdate is less than the current backoff window. +func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return eventTime.Sub(entry.lastUpdate) < entry.backoff +} + +// Garbage collect records that have aged past maxDuration. Backoff users are expected +// to invoke this periodically. +func (p *Backoff) GC() { + p.Lock() + defer p.Unlock() + now := p.Clock.Now() + for id, entry := range p.perItemBackoff { + if now.Sub(entry.lastUpdate) > p.maxDuration*2 { + // GC when entry has not been updated for 2*maxDuration + delete(p.perItemBackoff, id) + } + } +} + +func (p *Backoff) DeleteEntry(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Take a lock on *Backoff, before calling initEntryUnsafe +func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { + entry := &backoffEntry{backoff: p.defaultDuration} + p.perItemBackoff[id] = entry + return entry +} + +// After 2*maxDuration we restart the backoff factor to the beginning +func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go new file mode 100644 index 00000000..a63817ca --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go @@ -0,0 +1,116 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + + "github.com/juju/ratelimit" +) + +type RateLimiter interface { + // TryAccept returns true if a token is taken immediately. Otherwise, + // it returns false. + TryAccept() bool + // Accept returns once a token becomes available. + Accept() + // Stop stops the rate limiter, subsequent calls to CanAccept will return false + Stop() + // Saturation returns a percentage number which describes how saturated + // this rate limiter is. + // Usually we use token bucket rate limiter. In that case, + // 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use. + Saturation() float64 +} + +type tokenBucketRateLimiter struct { + limiter *ratelimit.Bucket +} + +// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. +// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a +// smoothed qps rate of 'qps'. +// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'. +// The maximum number of tokens in the bucket is capped at 'burst'. +func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { + limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst)) + return &tokenBucketRateLimiter{limiter} +} + +func (t *tokenBucketRateLimiter) TryAccept() bool { + return t.limiter.TakeAvailable(1) == 1 +} + +func (t *tokenBucketRateLimiter) Saturation() float64 { + capacity := t.limiter.Capacity() + avail := t.limiter.Available() + return float64(capacity-avail) / float64(capacity) +} + +// Accept will block until a token becomes available +func (t *tokenBucketRateLimiter) Accept() { + t.limiter.Wait(1) +} + +func (t *tokenBucketRateLimiter) Stop() { +} + +type fakeAlwaysRateLimiter struct{} + +func NewFakeAlwaysRateLimiter() RateLimiter { + return &fakeAlwaysRateLimiter{} +} + +func (t *fakeAlwaysRateLimiter) TryAccept() bool { + return true +} + +func (t *fakeAlwaysRateLimiter) Saturation() float64 { + return 0 +} + +func (t *fakeAlwaysRateLimiter) Stop() {} + +func (t *fakeAlwaysRateLimiter) Accept() {} + +type fakeNeverRateLimiter struct { + wg sync.WaitGroup +} + +func NewFakeNeverRateLimiter() RateLimiter { + wg := sync.WaitGroup{} + wg.Add(1) + return &fakeNeverRateLimiter{ + wg: wg, + } +} + +func (t *fakeNeverRateLimiter) TryAccept() bool { + return false +} + +func (t *fakeNeverRateLimiter) Saturation() float64 { + return 1 +} + +func (t *fakeNeverRateLimiter) Stop() { + t.wg.Done() +} + +func (t *fakeNeverRateLimiter) Accept() { + t.wg.Wait() +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go b/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go new file mode 100644 index 00000000..57171e10 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package homedir + +import ( + "os" + "runtime" +) + +// HomeDir returns the home directory for the current user +func HomeDir() string { + if runtime.GOOS == "windows" { + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDir := homeDrive + homePath + if _, err := os.Stat(homeDir); err == nil { + return homeDir + } + } + if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { + if _, err := os.Stat(userProfile); err == nil { + return userProfile + } + } + } + return os.Getenv("HOME") +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go b/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go new file mode 100644 index 00000000..c51cd952 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integer + +func IntMax(a, b int) int { + if b > a { + return b + } + return a +} + +func IntMin(a, b int) int { + if b < a { + return b + } + return a +} + +func Int64Max(a, b int64) int64 { + if b > a { + return b + } + return a +} + +func Int64Min(a, b int64) int64 { + if b < a { + return b + } + return a +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go new file mode 100644 index 00000000..6bdf4ac5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package jsonpath is a template engine using jsonpath syntax, +// which can be seen at http://goessner.net/articles/JsonPath/. +// In addition, it has {range} {end} function to iterate list and slice. +package jsonpath diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go new file mode 100644 index 00000000..7a402af4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go @@ -0,0 +1,486 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + + "k8s.io/kubernetes/third_party/golang/template" +) + +type JSONPath struct { + name string + parser *Parser + stack [][]reflect.Value //push and pop values in different scopes + cur []reflect.Value //current scope values + beginRange int + inRange int + endRange int +} + +func New(name string) *JSONPath { + return &JSONPath{ + name: name, + beginRange: 0, + inRange: 0, + endRange: 0, + } +} + +// Parse parse the given template, return error +func (j *JSONPath) Parse(text string) (err error) { + j.parser, err = Parse(j.name, text) + return +} + +// Execute bounds data into template and write the result +func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { + fullResults, err := j.FindResults(data) + if err != nil { + return err + } + for ix := range fullResults { + if err := j.PrintResults(wr, fullResults[ix]); err != nil { + return err + } + } + return nil +} + +func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { + if j.parser == nil { + return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) + } + + j.cur = []reflect.Value{reflect.ValueOf(data)} + nodes := j.parser.Root.Nodes + fullResult := [][]reflect.Value{} + for i := 0; i < len(nodes); i++ { + node := nodes[i] + results, err := j.walk(j.cur, node) + if err != nil { + return nil, err + } + + //encounter an end node, break the current block + if j.endRange > 0 && j.endRange <= j.inRange { + j.endRange -= 1 + break + } + //encounter a range node, start a range loop + if j.beginRange > 0 { + j.beginRange -= 1 + j.inRange += 1 + for k, value := range results { + j.parser.Root.Nodes = nodes[i+1:] + if k == len(results)-1 { + j.inRange -= 1 + } + nextResults, err := j.FindResults(value.Interface()) + if err != nil { + return nil, err + } + fullResult = append(fullResult, nextResults...) + } + break + } + fullResult = append(fullResult, results) + } + return fullResult, nil +} + +// PrintResults write the results into writer +func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { + for i, r := range results { + text, err := j.evalToText(r) + if err != nil { + return err + } + if i != len(results)-1 { + text = append(text, ' ') + } + if _, err = wr.Write(text); err != nil { + return err + } + } + return nil +} + +// walk visits tree rooted at the given node in DFS order +func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { + switch node := node.(type) { + case *ListNode: + return j.evalList(value, node) + case *TextNode: + return []reflect.Value{reflect.ValueOf(node.Text)}, nil + case *FieldNode: + return j.evalField(value, node) + case *ArrayNode: + return j.evalArray(value, node) + case *FilterNode: + return j.evalFilter(value, node) + case *IntNode: + return j.evalInt(value, node) + case *FloatNode: + return j.evalFloat(value, node) + case *WildcardNode: + return j.evalWildcard(value, node) + case *RecursiveNode: + return j.evalRecursive(value, node) + case *UnionNode: + return j.evalUnion(value, node) + case *IdentifierNode: + return j.evalIdentifier(value, node) + default: + return value, fmt.Errorf("unexpected Node %v", node) + } +} + +// evalInt evaluates IntNode +func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalFloat evaluates FloatNode +func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalList evaluates ListNode +func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { + var err error + curValue := value + for _, node := range node.Nodes { + curValue, err = j.walk(curValue, node) + if err != nil { + return curValue, err + } + } + return curValue, nil +} + +// evalIdentifier evaluates IdentifierNode +func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { + results := []reflect.Value{} + switch node.Name { + case "range": + j.stack = append(j.stack, j.cur) + j.beginRange += 1 + results = input + case "end": + if j.endRange < j.inRange { //inside a loop, break the current block + j.endRange += 1 + break + } + // the loop is about to end, pop value and continue the following execution + if len(j.stack) > 0 { + j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1] + } else { + return results, fmt.Errorf("not in range, nothing to end") + } + default: + return input, fmt.Errorf("unrecognized identifier %v", node.Name) + } + return results, nil +} + +// evalArray evaluates ArrayNode +func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + + value, isNil := template.Indirect(value) + if isNil { + continue + } + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice", value.Type()) + } + params := node.Params + if !params[0].Known { + params[0].Value = 0 + } + if params[0].Value < 0 { + params[0].Value += value.Len() + } + if !params[1].Known { + params[1].Value = value.Len() + } + + if params[1].Value < 0 { + params[1].Value += value.Len() + } + + sliceLength := value.Len() + if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. + if params[0].Value >= sliceLength { + return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) + } + if params[1].Value > sliceLength { + return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) + } + } + + if !params[2].Known { + value = value.Slice(params[0].Value, params[1].Value) + } else { + value = value.Slice3(params[0].Value, params[1].Value, params[2].Value) + } + for i := 0; i < value.Len(); i++ { + result = append(result, value.Index(i)) + } + } + return result, nil +} + +// evalUnion evaluates UnionNode +func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, listNode := range node.Nodes { + temp, err := j.evalList(input, listNode) + if err != nil { + return input, err + } + result = append(result, temp...) + } + return result, nil +} + +func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { + t := value.Type() + var inlineValue *reflect.Value + for ix := 0; ix < t.NumField(); ix++ { + f := t.Field(ix) + jsonTag := f.Tag.Get("json") + parts := strings.Split(jsonTag, ",") + if len(parts) == 0 { + continue + } + if parts[0] == node.Value { + return value.Field(ix), nil + } + if len(parts[0]) == 0 { + val := value.Field(ix) + inlineValue = &val + } + } + if inlineValue != nil { + if inlineValue.Kind() == reflect.Struct { + // handle 'inline' + match, err := j.findFieldInValue(inlineValue, node) + if err != nil { + return reflect.Value{}, err + } + if match.IsValid() { + return match, nil + } + } + } + return value.FieldByName(node.Value), nil +} + +// evalField evaluates filed of struct or key of map. +func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { + results := []reflect.Value{} + // If there's no input, there's no output + if len(input) == 0 { + return results, nil + } + for _, value := range input { + var result reflect.Value + value, isNil := template.Indirect(value) + if isNil { + continue + } + + if value.Kind() == reflect.Struct { + var err error + if result, err = j.findFieldInValue(&value, node); err != nil { + return nil, err + } + } else if value.Kind() == reflect.Map { + mapKeyType := value.Type().Key() + nodeValue := reflect.ValueOf(node.Value) + // node value type must be convertible to map key type + if !nodeValue.Type().ConvertibleTo(mapKeyType) { + return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) + } + result = value.MapIndex(nodeValue.Convert(mapKeyType)) + } + if result.IsValid() { + results = append(results, result) + } + } + if len(results) == 0 { + return results, fmt.Errorf("%s is not found", node.Value) + } + return results, nil +} + +// evalWildcard extract all contents of the given value +func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + value, isNil := template.Indirect(value) + if isNil { + continue + } + + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalRecursive visit the given value recursively and push all of them to result +func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + results := []reflect.Value{} + value, isNil := template.Indirect(value) + if isNil { + continue + } + + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + if len(results) != 0 { + result = append(result, value) + output, err := j.evalRecursive(results, node) + if err != nil { + return result, err + } + result = append(result, output...) + } + } + return result, nil +} + +// evalFilter filter array according to FilterNode +func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + value, _ = template.Indirect(value) + + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) + } + for i := 0; i < value.Len(); i++ { + temp := []reflect.Value{value.Index(i)} + lefts, err := j.evalList(temp, node.Left) + + //case exists + if node.Operator == "exists" { + if len(lefts) > 0 { + results = append(results, value.Index(i)) + } + continue + } + + if err != nil { + return input, err + } + + var left, right interface{} + if len(lefts) != 1 { + return input, fmt.Errorf("can only compare one element at a time") + } + left = lefts[0].Interface() + + rights, err := j.evalList(temp, node.Right) + if err != nil { + return input, err + } + if len(rights) != 1 { + return input, fmt.Errorf("can only compare one element at a time") + } + right = rights[0].Interface() + + pass := false + switch node.Operator { + case "<": + pass, err = template.Less(left, right) + case ">": + pass, err = template.Greater(left, right) + case "==": + pass, err = template.Equal(left, right) + case "!=": + pass, err = template.NotEqual(left, right) + case "<=": + pass, err = template.LessEqual(left, right) + case ">=": + pass, err = template.GreaterEqual(left, right) + default: + return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) + } + if err != nil { + return results, err + } + if pass { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalToText translates reflect value to corresponding text +func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { + iface, ok := template.PrintableValue(v) + if !ok { + return nil, fmt.Errorf("can't print type %s", v.Type()) + } + var buffer bytes.Buffer + fmt.Fprint(&buffer, iface) + return buffer.Bytes(), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go new file mode 100644 index 00000000..ddf015c0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go @@ -0,0 +1,239 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import "fmt" + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Type returns itself and provides an easy default implementation +func (t NodeType) Type() NodeType { + return t +} + +func (t NodeType) String() string { + return NodeTypeName[t] +} + +const ( + NodeText NodeType = iota + NodeArray + NodeList + NodeField + NodeIdentifier + NodeFilter + NodeInt + NodeFloat + NodeWildcard + NodeRecursive + NodeUnion +) + +var NodeTypeName = map[NodeType]string{ + NodeText: "NodeText", + NodeArray: "NodeArray", + NodeList: "NodeList", + NodeField: "NodeField", + NodeIdentifier: "NodeIdentifier", + NodeFilter: "NodeFilter", + NodeInt: "NodeInt", + NodeFloat: "NodeFloat", + NodeWildcard: "NodeWildcard", + NodeRecursive: "NodeRecursive", + NodeUnion: "NodeUnion", +} + +type Node interface { + Type() NodeType + String() string +} + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Nodes []Node // The element nodes in lexical order. +} + +func newList() *ListNode { + return &ListNode{NodeType: NodeList} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) String() string { + return fmt.Sprintf("%s", l.Type()) +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Text string // The text; may span newlines. +} + +func newText(text string) *TextNode { + return &TextNode{NodeType: NodeText, Text: text} +} + +func (t *TextNode) String() string { + return fmt.Sprintf("%s: %s", t.Type(), t.Text) +} + +// FieldNode holds field of struct +type FieldNode struct { + NodeType + Value string +} + +func newField(value string) *FieldNode { + return &FieldNode{NodeType: NodeField, Value: value} +} + +func (f *FieldNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Value) +} + +// IdentifierNode holds an identifier +type IdentifierNode struct { + NodeType + Name string +} + +func newIdentifier(value string) *IdentifierNode { + return &IdentifierNode{ + NodeType: NodeIdentifier, + Name: value, + } +} + +func (f *IdentifierNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Name) +} + +// ParamsEntry holds param information for ArrayNode +type ParamsEntry struct { + Value int + Known bool //whether the value is known when parse it +} + +// ArrayNode holds start, end, step information for array index selection +type ArrayNode struct { + NodeType + Params [3]ParamsEntry //start, end, step +} + +func newArray(params [3]ParamsEntry) *ArrayNode { + return &ArrayNode{ + NodeType: NodeArray, + Params: params, + } +} + +func (a *ArrayNode) String() string { + return fmt.Sprintf("%s: %v", a.Type(), a.Params) +} + +// FilterNode holds operand and operator information for filter +type FilterNode struct { + NodeType + Left *ListNode + Right *ListNode + Operator string +} + +func newFilter(left, right *ListNode, operator string) *FilterNode { + return &FilterNode{ + NodeType: NodeFilter, + Left: left, + Right: right, + Operator: operator, + } +} + +func (f *FilterNode) String() string { + return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) +} + +// IntNode holds integer value +type IntNode struct { + NodeType + Value int +} + +func newInt(num int) *IntNode { + return &IntNode{NodeType: NodeInt, Value: num} +} + +func (i *IntNode) String() string { + return fmt.Sprintf("%s: %d", i.Type(), i.Value) +} + +// FloatNode holds float value +type FloatNode struct { + NodeType + Value float64 +} + +func newFloat(num float64) *FloatNode { + return &FloatNode{NodeType: NodeFloat, Value: num} +} + +func (i *FloatNode) String() string { + return fmt.Sprintf("%s: %f", i.Type(), i.Value) +} + +// WildcardNode means a wildcard +type WildcardNode struct { + NodeType +} + +func newWildcard() *WildcardNode { + return &WildcardNode{NodeType: NodeWildcard} +} + +func (i *WildcardNode) String() string { + return fmt.Sprintf("%s", i.Type()) +} + +// RecursiveNode means a recursive descent operator +type RecursiveNode struct { + NodeType +} + +func newRecursive() *RecursiveNode { + return &RecursiveNode{NodeType: NodeRecursive} +} + +func (r *RecursiveNode) String() string { + return fmt.Sprintf("%s", r.Type()) +} + +// UnionNode is union of ListNode +type UnionNode struct { + NodeType + Nodes []*ListNode +} + +func newUnion(nodes []*ListNode) *UnionNode { + return &UnionNode{NodeType: NodeUnion, Nodes: nodes} +} + +func (u *UnionNode) String() string { + return fmt.Sprintf("%s", u.Type()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go new file mode 100644 index 00000000..bd1f5ecd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go @@ -0,0 +1,427 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +const eof = -1 + +const ( + leftDelim = "{" + rightDelim = "}" +) + +type Parser struct { + Name string + Root *ListNode + input string + cur *ListNode + pos int + start int + width int +} + +// Parse parsed the given text and return a node Parser. +// If an error is encountered, parsing stops and an empty +// Parser is returned with the error +func Parse(name, text string) (*Parser, error) { + p := NewParser(name) + err := p.Parse(text) + if err != nil { + p = nil + } + return p, err +} + +func NewParser(name string) *Parser { + return &Parser{ + Name: name, + } +} + +// parseAction parsed the expression inside delimiter +func parseAction(name, text string) (*Parser, error) { + p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) + // when error happens, p will be nil, so we need to return here + if err != nil { + return p, err + } + p.Root = p.Root.Nodes[0].(*ListNode) + return p, nil +} + +func (p *Parser) Parse(text string) error { + p.input = text + p.Root = newList() + p.pos = 0 + return p.parseText(p.Root) +} + +// consumeText return the parsed text since last cosumeText +func (p *Parser) consumeText() string { + value := p.input[p.start:p.pos] + p.start = p.pos + return value +} + +// next returns the next rune in the input. +func (p *Parser) next() rune { + if int(p.pos) >= len(p.input) { + p.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(p.input[p.pos:]) + p.width = w + p.pos += p.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (p *Parser) peek() rune { + r := p.next() + p.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (p *Parser) backup() { + p.pos -= p.width +} + +func (p *Parser) parseText(cur *ListNode) error { + for { + if strings.HasPrefix(p.input[p.pos:], leftDelim) { + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return p.parseLeftDelim(cur) + } + if p.next() == eof { + break + } + } + // Correctly reached EOF. + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return nil +} + +// parseLeftDelim scans the left delimiter, which is known to be present. +func (p *Parser) parseLeftDelim(cur *ListNode) error { + p.pos += len(leftDelim) + p.consumeText() + newNode := newList() + cur.append(newNode) + cur = newNode + return p.parseInsideAction(cur) +} + +func (p *Parser) parseInsideAction(cur *ListNode) error { + prefixMap := map[string]func(*ListNode) error{ + rightDelim: p.parseRightDelim, + "[?(": p.parseFilter, + "..": p.parseRecursive, + } + for prefix, parseFunc := range prefixMap { + if strings.HasPrefix(p.input[p.pos:], prefix) { + return parseFunc(cur) + } + } + + switch r := p.next(); { + case r == eof || isEndOfLine(r): + return fmt.Errorf("unclosed action") + case r == ' ': + p.consumeText() + case r == '@' || r == '$': //the current object, just pass it + p.consumeText() + case r == '[': + return p.parseArray(cur) + case r == '"': + return p.parseQuote(cur) + case r == '.': + return p.parseField(cur) + case r == '+' || r == '-' || unicode.IsDigit(r): + p.backup() + return p.parseNumber(cur) + case isAlphaNumeric(r): + p.backup() + return p.parseIdentifier(cur) + default: + return fmt.Errorf("unrecognized character in action: %#U", r) + } + return p.parseInsideAction(cur) +} + +// parseRightDelim scans the right delimiter, which is known to be present. +func (p *Parser) parseRightDelim(cur *ListNode) error { + p.pos += len(rightDelim) + p.consumeText() + cur = p.Root + return p.parseText(cur) +} + +// parseIdentifier scans build-in keywords, like "range" "end" +func (p *Parser) parseIdentifier(cur *ListNode) error { + var r rune + for { + r = p.next() + if isTerminator(r) { + p.backup() + break + } + } + value := p.consumeText() + cur.append(newIdentifier(value)) + return p.parseInsideAction(cur) +} + +// parseRecursive scans the recursive desent operator .. +func (p *Parser) parseRecursive(cur *ListNode) error { + p.pos += len("..") + p.consumeText() + cur.append(newRecursive()) + if r := p.peek(); isAlphaNumeric(r) { + return p.parseField(cur) + } + return p.parseInsideAction(cur) +} + +// parseNumber scans number +func (p *Parser) parseNumber(cur *ListNode) error { + r := p.peek() + if r == '+' || r == '-' { + r = p.next() + } + for { + r = p.next() + if r != '.' && !unicode.IsDigit(r) { + p.backup() + break + } + } + value := p.consumeText() + i, err := strconv.Atoi(value) + if err == nil { + cur.append(newInt(i)) + return p.parseInsideAction(cur) + } + d, err := strconv.ParseFloat(value, 64) + if err == nil { + cur.append(newFloat(d)) + return p.parseInsideAction(cur) + } + return fmt.Errorf("cannot parse number %s", value) +} + +// parseArray scans array index selection +func (p *Parser) parseArray(cur *ListNode) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated array") + case ']': + break Loop + } + } + text := p.consumeText() + text = string(text[1 : len(text)-1]) + if text == "*" { + text = ":" + } + + //union operator + strs := strings.Split(text, ",") + if len(strs) > 1 { + union := []*ListNode{} + for _, str := range strs { + parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) + if err != nil { + return err + } + union = append(union, parser.Root) + } + cur.append(newUnion(union)) + return p.parseInsideAction(cur) + } + + // dict key + reg := regexp.MustCompile(`^'([^']*)'$`) + value := reg.FindStringSubmatch(text) + if value != nil { + parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) + if err != nil { + return err + } + for _, node := range parser.Root.Nodes { + cur.append(node) + } + return p.parseInsideAction(cur) + } + + //slice operator + reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) + value = reg.FindStringSubmatch(text) + if value == nil { + return fmt.Errorf("invalid array index %s", text) + } + value = value[1:] + params := [3]ParamsEntry{} + for i := 0; i < 3; i++ { + if value[i] != "" { + if i > 0 { + value[i] = value[i][1:] + } + if i > 0 && value[i] == "" { + params[i].Known = false + } else { + var err error + params[i].Known = true + params[i].Value, err = strconv.Atoi(value[i]) + if err != nil { + return fmt.Errorf("array index %s is not a number", value[i]) + } + } + } else { + if i == 1 { + params[i].Known = true + params[i].Value = params[0].Value + 1 + } else { + params[i].Known = false + params[i].Value = 0 + } + } + } + cur.append(newArray(params)) + return p.parseInsideAction(cur) +} + +// parseFilter scans filter inside array selection +func (p *Parser) parseFilter(cur *ListNode) error { + p.pos += len("[?(") + p.consumeText() +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated filter") + case ')': + break Loop + } + } + if p.next() != ']' { + return fmt.Errorf("unclosed array expect ]") + } + reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) + text := p.consumeText() + text = string(text[:len(text)-2]) + value := reg.FindStringSubmatch(text) + if value == nil { + parser, err := parseAction("text", text) + if err != nil { + return err + } + cur.append(newFilter(parser.Root, newList(), "exists")) + } else { + leftParser, err := parseAction("left", value[1]) + if err != nil { + return err + } + rightParser, err := parseAction("right", value[3]) + if err != nil { + return err + } + cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) + } + return p.parseInsideAction(cur) +} + +// parseQuote unquotes string inside double quote +func (p *Parser) parseQuote(cur *ListNode) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated quoted string") + case '"': + break Loop + } + } + value := p.consumeText() + s, err := strconv.Unquote(value) + if err != nil { + return fmt.Errorf("unquote string %s error %v", value, err) + } + cur.append(newText(s)) + return p.parseInsideAction(cur) +} + +// parseField scans a field until a terminator +func (p *Parser) parseField(cur *ListNode) error { + p.consumeText() + var r rune + for { + r = p.next() + if isTerminator(r) { + p.backup() + break + } + } + value := p.consumeText() + if value == "*" { + cur.append(newWildcard()) + } else { + cur.append(newField(value)) + } + return p.parseInsideAction(cur) +} + +// isTerminator reports whether the input is at valid termination character to appear after an identifier. +func isTerminator(r rune) bool { + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '[', ']', '$', '@', '{', '}': + return true + } + return false +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go b/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go new file mode 100644 index 00000000..0746d878 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package labels provides utilities to work with Kubernetes labels. +package labels diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go b/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go new file mode 100644 index 00000000..624d5ad6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go @@ -0,0 +1,126 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/api/unversioned" +) + +// Clones the given map and returns a new map with the given key and value added. +// Returns the given map, if labelKey is empty. +func CloneAndAddLabel(labels map[string]string, labelKey string, labelValue uint32) map[string]string { + if labelKey == "" { + // Don't need to add a label. + return labels + } + // Clone. + newLabels := map[string]string{} + for key, value := range labels { + newLabels[key] = value + } + newLabels[labelKey] = fmt.Sprintf("%d", labelValue) + return newLabels +} + +// CloneAndRemoveLabel clones the given map and returns a new map with the given key removed. +// Returns the given map, if labelKey is empty. +func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string { + if labelKey == "" { + // Don't need to add a label. + return labels + } + // Clone. + newLabels := map[string]string{} + for key, value := range labels { + newLabels[key] = value + } + delete(newLabels, labelKey) + return newLabels +} + +// AddLabel returns a map with the given key and value added to the given map. +func AddLabel(labels map[string]string, labelKey string, labelValue string) map[string]string { + if labelKey == "" { + // Don't need to add a label. + return labels + } + if labels == nil { + labels = make(map[string]string) + } + labels[labelKey] = labelValue + return labels +} + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *unversioned.LabelSelector, labelKey string, labelValue uint32) *unversioned.LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := new(unversioned.LabelSelector) + + // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. + newSelector.MatchLabels = make(map[string]string) + if selector.MatchLabels != nil { + for key, val := range selector.MatchLabels { + newSelector.MatchLabels[key] = val + } + } + newSelector.MatchLabels[labelKey] = fmt.Sprintf("%d", labelValue) + + if selector.MatchExpressions != nil { + newMExps := make([]unversioned.LabelSelectorRequirement, len(selector.MatchExpressions)) + for i, me := range selector.MatchExpressions { + newMExps[i].Key = me.Key + newMExps[i].Operator = me.Operator + if me.Values != nil { + newMExps[i].Values = make([]string, len(me.Values)) + copy(newMExps[i].Values, me.Values) + } else { + newMExps[i].Values = nil + } + } + newSelector.MatchExpressions = newMExps + } else { + newSelector.MatchExpressions = nil + } + + return newSelector +} + +// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. +func AddLabelToSelector(selector *unversioned.LabelSelector, labelKey string, labelValue string) *unversioned.LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + if selector.MatchLabels == nil { + selector.MatchLabels = make(map[string]string) + } + selector.MatchLabels[labelKey] = labelValue + return selector +} + +// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels +func SelectorHasLabel(selector *unversioned.LabelSelector, labelKey string) bool { + return len(selector.MatchLabels[labelKey]) > 0 +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go b/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go new file mode 100644 index 00000000..3bad7d0b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pod provides utilities to work with Kubernetes pod and pod templates. +package pod diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go b/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go new file mode 100644 index 00000000..9c57aaeb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go @@ -0,0 +1,100 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "hash/adler32" + "time" + + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" + errorsutil "k8s.io/kubernetes/pkg/util/errors" + hashutil "k8s.io/kubernetes/pkg/util/hash" + "k8s.io/kubernetes/pkg/util/wait" +) + +func GetPodTemplateSpecHash(template api.PodTemplateSpec) uint32 { + podTemplateSpecHasher := adler32.New() + hashutil.DeepHashObject(podTemplateSpecHasher, template) + return podTemplateSpecHasher.Sum32() +} + +// TODO: use client library instead when it starts to support update retries +// see https://github.com/kubernetes/kubernetes/issues/21479 +type updatePodFunc func(pod *api.Pod) error + +// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored. +// The returned bool value can be used to tell if the pod is actually updated. +func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, bool, error) { + var err error + var podUpdated bool + oldPod := pod + if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { + pod, err = podClient.Get(oldPod.Name) + if err != nil { + return false, err + } + // Apply the update, then attempt to push it to the apiserver. + if err = applyUpdate(pod); err != nil { + return false, err + } + if pod, err = podClient.Update(pod); err == nil { + // Update successful. + return true, nil + } + // TODO: don't retry on perm-failed errors and handle them gracefully + // Update could have failed due to conflict error. Try again. + return false, nil + }); err == nil { + // When there's no error, we've updated this pod. + podUpdated = true + } + + // Handle returned error from wait poll + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("timed out trying to update pod: %+v", oldPod) + } + // Ignore the pod not found error, but the pod isn't updated. + if errors.IsNotFound(err) { + glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) + err = nil + } + // Ignore the precondition violated error, but the pod isn't updated. + if err == errorsutil.ErrPreconditionViolated { + glog.V(4).Infof("%s %s/%s precondition doesn't hold, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) + err = nil + } + + // If the error is non-nil the returned pod cannot be trusted; if podUpdated is false, the pod isn't updated; + // if the error is nil and podUpdated is true, the returned pod contains the applied update. + return pod, podUpdated, err +} + +// Filter uses the input function f to filter the given pod list, and return the filtered pods +func Filter(podList *api.PodList, f func(api.Pod) bool) []api.Pod { + pods := make([]api.Pod, 0) + for _, p := range podList.Items { + if f(p) { + pods = append(pods, p) + } + } + return pods +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go b/vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go new file mode 100644 index 00000000..e5dd2651 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go @@ -0,0 +1,110 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicaset + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apis/extensions" + unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" + "k8s.io/kubernetes/pkg/labels" + errorsutil "k8s.io/kubernetes/pkg/util/errors" + labelsutil "k8s.io/kubernetes/pkg/util/labels" + podutil "k8s.io/kubernetes/pkg/util/pod" + "k8s.io/kubernetes/pkg/util/wait" +) + +// TODO: use client library instead when it starts to support update retries +// see https://github.com/kubernetes/kubernetes/issues/21479 +type updateRSFunc func(rs *extensions.ReplicaSet) error + +// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored. +// The returned bool value can be used to tell if the RS is actually updated. +func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, applyUpdate updateRSFunc) (*extensions.ReplicaSet, bool, error) { + var err error + var rsUpdated bool + oldRs := rs + if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { + rs, err = rsClient.Get(oldRs.Name) + if err != nil { + return false, err + } + // Apply the update, then attempt to push it to the apiserver. + if err = applyUpdate(rs); err != nil { + return false, err + } + if rs, err = rsClient.Update(rs); err == nil { + // Update successful. + return true, nil + } + // TODO: don't retry on perm-failed errors and handle them gracefully + // Update could have failed due to conflict error. Try again. + return false, nil + }); err == nil { + // When there's no error, we've updated this RS. + rsUpdated = true + } + + // Handle returned error from wait poll + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("timed out trying to update RS: %+v", oldRs) + } + // Ignore the RS not found error, but the RS isn't updated. + if errors.IsNotFound(err) { + glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldRs.Kind, oldRs.Namespace, oldRs.Name) + err = nil + } + // Ignore the precondition violated error, but the RS isn't updated. + if err == errorsutil.ErrPreconditionViolated { + glog.V(4).Infof("%s %s/%s precondition doesn't hold, skip updating it.", oldRs.Kind, oldRs.Namespace, oldRs.Name) + err = nil + } + + // If the error is non-nil the returned RS cannot be trusted; if rsUpdated is false, the contoller isn't updated; + // if the error is nil and rsUpdated is true, the returned RS contains the applied update. + return rs, rsUpdated, err +} + +// GetPodTemplateSpecHash returns the pod template hash of a ReplicaSet's pod template space +func GetPodTemplateSpecHash(rs extensions.ReplicaSet) string { + meta := rs.Spec.Template.ObjectMeta + meta.Labels = labelsutil.CloneAndRemoveLabel(meta.Labels, extensions.DefaultDeploymentUniqueLabelKey) + return fmt.Sprintf("%d", podutil.GetPodTemplateSpecHash(api.PodTemplateSpec{ + ObjectMeta: meta, + Spec: rs.Spec.Template.Spec, + })) +} + +// MatchingPodsFunc returns a filter function for pods with matching labels +func MatchingPodsFunc(rs *extensions.ReplicaSet) (func(api.Pod) bool, error) { + if rs == nil { + return nil, nil + } + selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + return func(pod api.Pod) bool { + podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) + return selector.Matches(podLabelsSelector) + }, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go new file mode 100644 index 00000000..f32dbabc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slice provides utility methods for common operations on slices. +package slice + +import ( + "sort" + + utilrand "k8s.io/kubernetes/pkg/util/rand" +) + +// CopyStrings copies the contents of the specified string slice +// into a new slice. +func CopyStrings(s []string) []string { + c := make([]string, len(s)) + copy(c, s) + return c +} + +// SortStrings sorts the specified string slice in place. It returns the same +// slice that was provided in order to facilitate method chaining. +func SortStrings(s []string) []string { + sort.Strings(s) + return s +} + +// ShuffleStrings copies strings from the specified slice into a copy in random +// order. It returns a new slice. +func ShuffleStrings(s []string) []string { + shuffled := make([]string, len(s)) + perm := utilrand.Perm(len(s)) + for i, j := range perm { + shuffled[j] = s[i] + } + return shuffled +} + +// Int64Slice attaches the methods of Interface to []int64, +// sorting in increasing order. +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Sorts []int64 in increasing order +func SortInts64(a []int64) { sort.Sort(Int64Slice(a)) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go new file mode 100644 index 00000000..676713bc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go @@ -0,0 +1,1243 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + + "k8s.io/kubernetes/pkg/util/json" + forkedjson "k8s.io/kubernetes/third_party/forked/json" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" +) + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(errPreconditionFailed) + return ok +} + +type errPreconditionFailed struct { + message string +} + +func newErrPreconditionFailed(target map[string]interface{}) errPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return errPreconditionFailed{s} +} + +func (err errPreconditionFailed) Error() string { + return err.message +} + +type errConflict struct { + message string +} + +func newErrConflict(patch, current string) errConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return errConflict{s} +} + +func (err errConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(errConflict) + return ok +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON document") +var errNoListOfLists = fmt.Errorf("Lists of lists are not supported") + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// Deprecated: Use the synonym CreateTwoWayMergePatch, instead. +func CreateStrategicMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) { + return CreateTwoWayMergePatch(original, modified, dataStruct) +} + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, errBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, errBadJSONDoc + } + } + + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + + patchMap, err := diffMaps(originalMap, modifiedMap, t, false, false) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, newErrPreconditionFailed(patchMap) + } + } + + return json.Marshal(patchMap) +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreChangesAndAdditions, ignoreDeletions bool) (map[string]interface{}, error) { + patch := map[string]interface{}{} + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + for key, modifiedValue := range modified { + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !ignoreChangesAndAdditions { + patch[key] = modifiedValue + } + + continue + } + + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + + modifiedString, ok := modifiedValue.(string) + if !ok { + return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !ignoreChangesAndAdditions { + patch[key] = modifiedValue + } + + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + fieldType, _, _, err := forkedjson.LookupPatchMetadata(t, key) + if err != nil { + return nil, err + } + + patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreChangesAndAdditions, ignoreDeletions) + if err != nil { + return nil, err + } + + if len(patchValue) > 0 { + patch[key] = patchValue + } + + continue + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + patchValue, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreChangesAndAdditions, ignoreDeletions) + if err != nil { + return nil, err + } + + if len(patchValue) > 0 { + patch[key] = patchValue + } + + continue + } + } + + if !ignoreChangesAndAdditions { + if !reflect.DeepEqual(originalValue, modifiedValue) { + // Values are different, so add to patch + patch[key] = modifiedValue + } + } + } + + if !ignoreDeletions { + // Add nils for deleted values + for key := range original { + _, found := modified[key] + if !found { + patch[key] = nil + } + } + } + + return patch, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists with merge semantics. +func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { + if len(original) == 0 { + if len(modified) == 0 || ignoreChangesAndAdditions { + return nil, nil + } + + return modified, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, err + } + + var patch []interface{} + + if elementType.Kind() == reflect.Map { + patch, err = diffListsOfMaps(original, modified, t, mergeKey, ignoreChangesAndAdditions, ignoreDeletions) + } else if !ignoreChangesAndAdditions { + patch, err = diffListsOfScalars(original, modified) + } + + if err != nil { + return nil, err + } + + return patch, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists of scalars with merge semantics. +func diffListsOfScalars(original, modified []interface{}) ([]interface{}, error) { + if len(modified) == 0 { + // There is no need to check the length of original because there is no way to create + // a patch that deletes a scalar from a list of scalars with merge semantics. + return nil, nil + } + + patch := []interface{}{} + + originalScalars := uniqifyAndSortScalars(original) + modifiedScalars := uniqifyAndSortScalars(modified) + originalIndex, modifiedIndex := 0, 0 + +loopB: + for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { + for ; originalIndex < len(originalScalars); originalIndex++ { + originalString := fmt.Sprintf("%v", original[originalIndex]) + modifiedString := fmt.Sprintf("%v", modified[modifiedIndex]) + if originalString >= modifiedString { + if originalString != modifiedString { + patch = append(patch, modified[modifiedIndex]) + } + + continue loopB + } + // There is no else clause because there is no way to create a patch that deletes + // a scalar from a list of scalars with merge semantics. + } + + break + } + + // Add any remaining items found only in modified + for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { + patch = append(patch, modified[modifiedIndex]) + } + + return patch, nil +} + +var errNoMergeKeyFmt = "map: %v does not contain declared merge key: %s" +var errBadArgTypeFmt = "expected a %s, but received a %s" + +// Returns a (recursive) strategic merge patch that yields modified when applied to original, +// for a pair of lists of maps with merge semantics. +func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { + patch := make([]interface{}, 0) + + originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) + if err != nil { + return nil, err + } + + modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) + if err != nil { + return nil, err + } + + originalIndex, modifiedIndex := 0, 0 + +loopB: + for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { + modifiedMap, ok := modifiedSorted[modifiedIndex].(map[string]interface{}) + if !ok { + t := reflect.TypeOf(modifiedSorted[modifiedIndex]) + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) + } + + modifiedValue, ok := modifiedMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, modifiedMap, mergeKey) + } + + for ; originalIndex < len(originalSorted); originalIndex++ { + originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) + if !ok { + t := reflect.TypeOf(originalSorted[originalIndex]) + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) + } + + originalValue, ok := originalMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) + } + + // Assume that the merge key values are comparable strings + originalString := fmt.Sprintf("%v", originalValue) + modifiedString := fmt.Sprintf("%v", modifiedValue) + if originalString >= modifiedString { + if originalString == modifiedString { + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreChangesAndAdditions, ignoreDeletions) + if err != nil { + return nil, err + } + + originalIndex++ + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedValue + patch = append(patch, patchValue) + } + } else if !ignoreChangesAndAdditions { + // Item was added, so add to patch + patch = append(patch, modifiedMap) + } + + continue loopB + } + + if !ignoreDeletions { + // Item was deleted, so add delete directive + patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) + } + } + + break + } + + if !ignoreDeletions { + // Delete any remaining items found only in original + for ; originalIndex < len(originalSorted); originalIndex++ { + originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) + if !ok { + t := reflect.TypeOf(originalSorted[originalIndex]) + return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) + } + + originalValue, ok := originalMap[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) + } + + patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) + } + } + + if !ignoreChangesAndAdditions { + // Add any remaining items found only in modified + for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { + patch = append(patch, modifiedSorted[modifiedIndex]) + } + } + + return patch, nil +} + +// Deprecated: StrategicMergePatchData is deprecated. Use the synonym StrategicMergePatch, +// instead, which follows the naming convention of evanphx/json-patch. +func StrategicMergePatchData(original, patch []byte, dataStruct interface{}) ([]byte, error) { + return StrategicMergePatch(original, patch, dataStruct) +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + if original == nil { + original = []byte("{}") + } + + if patch == nil { + patch = []byte("{}") + } + + originalMap := map[string]interface{}{} + err := json.Unmarshal(original, &originalMap) + if err != nil { + return nil, errBadJSONDoc + } + + patchMap := map[string]interface{}{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, errBadJSONDoc + } + + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + + result, err := mergeMap(originalMap, patchMap, t) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, fmt.Errorf(errBadArgTypeFmt, "struct", "nil") + } + + t := reflect.TypeOf(dataStruct) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf(errBadArgTypeFmt, "struct", t.Kind().String()) + } + + return t, nil +} + +var errBadPatchTypeFmt = "unknown patch type: %s in map: %v" + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + if v == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if v == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + // Start merging the patch into the original. + for k, patchV := range patch { + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, skip it. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + + continue + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + // If the data type is a pointer, resolve the element. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + // If they're both maps or lists, recurse into the value. + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType == patchType { + // First find the fieldPatchStrategy and fieldPatchMergeKey. + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) + if err != nil { + return nil, err + } + + if originalType.Kind() == reflect.Map && fieldPatchStrategy != replaceDirective { + typedOriginal := original[k].(map[string]interface{}) + typedPatch := patchV.(map[string]interface{}) + var err error + original[k], err = mergeMap(typedOriginal, typedPatch, fieldType) + if err != nil { + return nil, err + } + + continue + } + + if originalType.Kind() == reflect.Slice && fieldPatchStrategy == mergeDirective { + elemType := fieldType.Elem() + typedOriginal := original[k].([]interface{}) + typedPatch := patchV.([]interface{}) + var err error + original[k], err = mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey) + if err != nil { + return nil, err + } + + continue + } + } + + // If originalType and patchType are different OR the types are both + // maps or slices but we're just supposed to replace them, just take + // the value from patch. + original[k] = patchV + } + + return original, nil +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + // If the elements are not maps, merge the slices of scalars. + if t.Kind() != reflect.Map { + // Maybe in the future add a "concat" mode that doesn't + // uniqify. + both := append(original, patch...) + return uniqifyScalars(both), nil + } + + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) + } + + // First look for any special $patch elements. + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if ok { + if patchType == deleteDirective { + mergeValue, ok := typedV[mergeKey] + if ok { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + } else { + return nil, fmt.Errorf("delete patch type with no merge key defined") + } + } else if patchType == replaceDirective { + replace = true + // Continue iterating through the array to prune any other $patch elements. + } else if patchType == mergeDirective { + return nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + } else { + return nil, fmt.Errorf(errBadPatchTypeFmt, patchType, typedV) + } + } else { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } + } + + if replace { + return patchWithoutSpecialElements, nil + } + + patch = patchWithoutSpecialElements + + // Merge patch into original. + for _, v := range patch { + // Because earlier we confirmed that all the elements are maps. + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, fmt.Errorf(errNoMergeKeyFmt, typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, elemType) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + + return original, nil +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, err + } + + newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k != directiveMarker { + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) + if err != nil { + return nil, err + } + + // If v is a map or a merge slice, recurse. + if typedV, ok := v.(map[string]interface{}); ok { + var err error + v, err = sortMergeListsByNameMap(typedV, fieldType) + if err != nil { + return nil, err + } + } else if typedV, ok := v.([]interface{}); ok { + if fieldPatchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return uniqifyAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, elemType) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func uniqifyAndSortScalars(s []interface{}) []interface{} { + s = uniqifyScalars(s) + + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func uniqifyScalars(s []interface{}) []interface{} { + // Clever algorithm to uniqify. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, errNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + return false, nil + } + return HasConflicts(leftValue, rightValue) + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + return HasConflicts(typedLeft[i], typedRight[i]) + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { + t, err := getTagStructType(dataStruct) + if err != nil { + return true, err + } + + return mergingMapFieldsHaveConflicts(left, right, t, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + fieldType reflect.Type, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + switch rightType := right.(type) { + case map[string]interface{}: + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, fieldType) + default: + return true, nil + } + case []interface{}: + switch rightType := right.(type) { + case []interface{}: + return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) + default: + return true, nil + } + case string, float64, bool, int, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker { + if rightValue, ok := typedRight[key]; ok { + fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) + if err != nil { + return true, err + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + fieldType, fieldPatchStrategy, fieldPatchMergeKey); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + fieldType reflect.Type, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + valueType := fieldType.Elem() + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = uniqifyAndSortScalars(typedLeft) + typedRight = uniqifyAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). +func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, errBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, errBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, errBadJSONDoc + } + } + + t, err := getTagStructType(dataStruct) + if err != nil { + return nil, err + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMap, err := diffMaps(currentMap, modifiedMap, t, false, true) + if err != nil { + return nil, err + } + + deletionsMap, err := diffMaps(originalMap, modifiedMap, t, true, false) + if err != nil { + return nil, err + } + + patchMap, err := mergeMap(deletionsMap, deltaMap, t) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, newErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changedMap, err := diffMaps(originalMap, currentMap, t, false, false) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, newErrConflict(toYAMLOrError(patchMap), toYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func toYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/version/.gitattributes b/vendor/k8s.io/kubernetes/pkg/version/.gitattributes new file mode 100644 index 00000000..7e349eff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go new file mode 100644 index 00000000..95fe9e94 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -0,0 +1,59 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// pkg/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string = "1" // major version, always numeric + gitMinor string = "3" // minor version, numeric possibly followed by "+" + + // semantic version, dervied by build scripts (see + // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + gitVersion string = "v1.3.0+$Format:%h$" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" + + buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/kubernetes/pkg/version/doc.go b/vendor/k8s.io/kubernetes/pkg/version/doc.go new file mode 100644 index 00000000..c0397829 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies version information collected at build time to +// kubernetes components. +package version diff --git a/vendor/k8s.io/kubernetes/pkg/version/semver.go b/vendor/k8s.io/kubernetes/pkg/version/semver.go new file mode 100644 index 00000000..1b5a845a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/semver.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "strings" + "unicode" + + "github.com/blang/semver" + "github.com/golang/glog" +) + +func Parse(gitversion string) (semver.Version, error) { + // optionally trim leading spaces then one v + var seen bool + gitversion = strings.TrimLeftFunc(gitversion, func(ch rune) bool { + if seen { + return false + } + if ch == 'v' { + seen = true + return true + } + return unicode.IsSpace(ch) + }) + + return semver.Make(gitversion) +} + +func MustParse(gitversion string) semver.Version { + v, err := Parse(gitversion) + if err != nil { + glog.Fatalf("failed to parse semver from gitversion %q: %v", gitversion, err) + } + return v +} diff --git a/vendor/k8s.io/kubernetes/pkg/version/version.go b/vendor/k8s.io/kubernetes/pkg/version/version.go new file mode 100644 index 00000000..b8ac0d6c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/version.go @@ -0,0 +1,76 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + "github.com/prometheus/client_golang/prometheus" +) + +// Info contains versioning information. +// TODO: Add []string of api versions supported? It's still unclear +// how we'll want to distribute that information. +type Info struct { + Major string `json:"major"` + Minor string `json:"minor"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} + +// String returns info as a human-friendly version string. +func (info Info) String() string { + return info.GitVersion +} + +func init() { + buildInfo := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + }, + []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"}, + ) + info := Get() + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) + + prometheus.MustRegister(buildInfo) +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go new file mode 100644 index 00000000..1efbb20f --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gcp + +import ( + "net/http" + "time" + + "github.com/golang/glog" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + + "k8s.io/kubernetes/pkg/client/restclient" +) + +func init() { + if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { + glog.Fatalf("Failed to register gcp auth plugin: %v", err) + } +} + +type gcpAuthProvider struct { + tokenSource oauth2.TokenSource + persister restclient.AuthProviderConfigPersister +} + +func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { + ts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister) + if err != nil { + return nil, err + } + return &gcpAuthProvider{ts, persister}, nil +} + +func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { + return &oauth2.Transport{ + Source: g.tokenSource, + Base: rt, + } +} + +func (g *gcpAuthProvider) Login() error { return nil } + +type cachedTokenSource struct { + source oauth2.TokenSource + accessToken string + expiry time.Time + persister restclient.AuthProviderConfigPersister +} + +func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister) (*cachedTokenSource, error) { + var expiryTime time.Time + if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil { + expiryTime = parsedTime + } + ts, err := google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform") + if err != nil { + return nil, err + } + return &cachedTokenSource{ + source: ts, + accessToken: accessToken, + expiry: expiryTime, + persister: persister, + }, nil +} + +func (t *cachedTokenSource) Token() (*oauth2.Token, error) { + tok := &oauth2.Token{ + AccessToken: t.accessToken, + TokenType: "Bearer", + Expiry: t.expiry, + } + if tok.Valid() && !tok.Expiry.IsZero() { + return tok, nil + } + tok, err := t.source.Token() + if err != nil { + return nil, err + } + if t.persister != nil { + cached := map[string]string{ + "access-token": tok.AccessToken, + "expiry": tok.Expiry.Format(time.RFC3339Nano), + } + if err := t.persister.Persist(cached); err != nil { + glog.V(4).Infof("Failed to persist token: %v", err) + } + } + return tok, nil +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS new file mode 100644 index 00000000..ecf33499 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS @@ -0,0 +1,2 @@ +assignees: + - bobbyrullo diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go new file mode 100644 index 00000000..3ad279c1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go @@ -0,0 +1,270 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package oidc + +import ( + "encoding/base64" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/coreos/go-oidc/jose" + "github.com/coreos/go-oidc/oauth2" + "github.com/coreos/go-oidc/oidc" + "github.com/golang/glog" + + "k8s.io/kubernetes/pkg/client/restclient" + "k8s.io/kubernetes/pkg/util/wait" +) + +const ( + cfgIssuerUrl = "idp-issuer-url" + cfgClientID = "client-id" + cfgClientSecret = "client-secret" + cfgCertificateAuthority = "idp-certificate-authority" + cfgCertificateAuthorityData = "idp-certificate-authority-data" + cfgExtraScopes = "extra-scopes" + cfgIDToken = "id-token" + cfgRefreshToken = "refresh-token" +) + +var ( + backoff = wait.Backoff{ + Duration: 1 * time.Second, + Factor: 2, + Jitter: .1, + Steps: 5, + } +) + +func init() { + if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil { + glog.Fatalf("Failed to register oidc auth plugin: %v", err) + } +} + +func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { + issuer := cfg[cfgIssuerUrl] + if issuer == "" { + return nil, fmt.Errorf("Must provide %s", cfgIssuerUrl) + } + + clientID := cfg[cfgClientID] + if clientID == "" { + return nil, fmt.Errorf("Must provide %s", cfgClientID) + } + + clientSecret := cfg[cfgClientSecret] + if clientSecret == "" { + return nil, fmt.Errorf("Must provide %s", cfgClientSecret) + } + + var certAuthData []byte + var err error + if cfg[cfgCertificateAuthorityData] != "" { + certAuthData, err = base64.StdEncoding.DecodeString(cfg[cfgCertificateAuthorityData]) + if err != nil { + return nil, err + } + } + + clientConfig := restclient.Config{ + TLSClientConfig: restclient.TLSClientConfig{ + CAFile: cfg[cfgCertificateAuthority], + CAData: certAuthData, + }, + } + + trans, err := restclient.TransportFor(&clientConfig) + if err != nil { + return nil, err + } + hc := &http.Client{Transport: trans} + + providerCfg, err := oidc.FetchProviderConfig(hc, strings.TrimSuffix(issuer, "/")) + if err != nil { + return nil, fmt.Errorf("error fetching provider config: %v", err) + } + + scopes := strings.Split(cfg[cfgExtraScopes], ",") + oidcCfg := oidc.ClientConfig{ + HTTPClient: hc, + Credentials: oidc.ClientCredentials{ + ID: clientID, + Secret: clientSecret, + }, + ProviderConfig: providerCfg, + Scope: append(scopes, oidc.DefaultScope...), + } + + client, err := oidc.NewClient(oidcCfg) + if err != nil { + return nil, fmt.Errorf("error creating OIDC Client: %v", err) + } + + oClient := &oidcClient{client} + + var initialIDToken jose.JWT + if cfg[cfgIDToken] != "" { + initialIDToken, err = jose.ParseJWT(cfg[cfgIDToken]) + if err != nil { + return nil, err + } + } + + return &oidcAuthProvider{ + initialIDToken: initialIDToken, + refresher: &idTokenRefresher{ + client: oClient, + cfg: cfg, + persister: persister, + }, + }, nil +} + +type oidcAuthProvider struct { + refresher *idTokenRefresher + initialIDToken jose.JWT +} + +func (g *oidcAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { + at := &oidc.AuthenticatedTransport{ + TokenRefresher: g.refresher, + RoundTripper: rt, + } + at.SetJWT(g.initialIDToken) + return &roundTripper{ + wrapped: at, + refresher: g.refresher, + } +} + +func (g *oidcAuthProvider) Login() error { + return errors.New("not yet implemented") +} + +type OIDCClient interface { + refreshToken(rt string) (oauth2.TokenResponse, error) + verifyJWT(jwt jose.JWT) error +} + +type roundTripper struct { + refresher *idTokenRefresher + wrapped *oidc.AuthenticatedTransport +} + +func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var res *http.Response + var err error + firstTime := true + wait.ExponentialBackoff(backoff, func() (bool, error) { + if !firstTime { + var jwt jose.JWT + jwt, err = r.refresher.Refresh() + if err != nil { + return true, nil + } + r.wrapped.SetJWT(jwt) + } else { + firstTime = false + } + + res, err = r.wrapped.RoundTrip(req) + if err != nil { + return true, nil + } + if res.StatusCode == http.StatusUnauthorized { + return false, nil + } + return true, nil + }) + return res, err +} + +type idTokenRefresher struct { + cfg map[string]string + client OIDCClient + persister restclient.AuthProviderConfigPersister + intialIDToken jose.JWT +} + +func (r *idTokenRefresher) Verify(jwt jose.JWT) error { + claims, err := jwt.Claims() + if err != nil { + return err + } + + now := time.Now() + exp, ok, err := claims.TimeClaim("exp") + switch { + case err != nil: + return fmt.Errorf("failed to parse 'exp' claim: %v", err) + case !ok: + return errors.New("missing required 'exp' claim") + case exp.Before(now): + return fmt.Errorf("token already expired at: %v", exp) + } + + return nil +} + +func (r *idTokenRefresher) Refresh() (jose.JWT, error) { + rt, ok := r.cfg[cfgRefreshToken] + if !ok { + return jose.JWT{}, errors.New("No valid id-token, and cannot refresh without refresh-token") + } + + tokens, err := r.client.refreshToken(rt) + if err != nil { + return jose.JWT{}, fmt.Errorf("could not refresh token: %v", err) + } + jwt, err := jose.ParseJWT(tokens.IDToken) + if err != nil { + return jose.JWT{}, err + } + + if tokens.RefreshToken != "" && tokens.RefreshToken != rt { + r.cfg[cfgRefreshToken] = tokens.RefreshToken + } + r.cfg[cfgIDToken] = jwt.Encode() + + err = r.persister.Persist(r.cfg) + if err != nil { + return jose.JWT{}, fmt.Errorf("could not perist new tokens: %v", err) + } + + return jwt, r.client.verifyJWT(jwt) +} + +type oidcClient struct { + client *oidc.Client +} + +func (o *oidcClient) refreshToken(rt string) (oauth2.TokenResponse, error) { + oac, err := o.client.OAuthClient() + if err != nil { + return oauth2.TokenResponse{}, err + } + + return oac.RequestToken(oauth2.GrantTypeRefreshToken, rt) +} + +func (o *oidcClient) verifyJWT(jwt jose.JWT) error { + return o.client.VerifyJWT(jwt) +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go new file mode 100644 index 00000000..2b422ddd --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plugins + +import ( + // Initialize all known client auth plugins. + _ "k8s.io/kubernetes/plugin/pkg/client/auth/gcp" + _ "k8s.io/kubernetes/plugin/pkg/client/auth/oidc" +) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/json/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/json/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/json/fields.go b/vendor/k8s.io/kubernetes/third_party/forked/json/fields.go new file mode 100644 index 00000000..1d17270e --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/json/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadata(t reflect.Type, jsonField string) (reflect.Type, string, string, error) { + if t.Kind() == reflect.Map { + return t.Elem(), "", "", nil + } + if t.Kind() != reflect.Struct { + return nil, "", "", fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s", + t.Kind().String()) + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get("patchStrategy") + patchMergeKey := tjf.Tag.Get("patchMergeKey") + return tjf.Type, patchStrategy, patchMergeKey, nil + } + return nil, "", "", fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/kubernetes/third_party/golang/template/exec.go b/vendor/k8s.io/kubernetes/third_party/golang/template/exec.go new file mode 100644 index 00000000..739fd350 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/golang/template/exec.go @@ -0,0 +1,94 @@ +//This package is copied from Go library text/template. +//The original private functions indirect and printableValue +//are exported as public functions. +package template + +import ( + "fmt" + "reflect" +) + +var Indirect = indirect +var PrintableValue = printableValue + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} diff --git a/vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go b/vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go new file mode 100644 index 00000000..27a008b0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go @@ -0,0 +1,599 @@ +//This package is copied from Go library text/template. +//The original private functions eq, ge, gt, le, lt, and ne +//are exported as public functions. +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var Equal = eq +var GreaterEqual = ge +var Greater = gt +var LessEqual = le +var Less = lt +var NotEqual = ne + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// AddFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string) (reflect.Value, bool) { + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +}