From 92b6678d1b106c150878fb186089df672cecd60f Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 18 Oct 2016 21:38:27 +0530 Subject: [PATCH 01/33] Initial implementation of BuildConfig support for Openshift provider. --- cmd/convert.go | 16 +++--- pkg/app/app.go | 5 ++ pkg/kobject/kobject.go | 1 + pkg/loader/compose/compose.go | 1 + pkg/transformer/kubernetes/k8sutils.go | 3 ++ pkg/transformer/kubernetes/kubernetes.go | 3 ++ pkg/transformer/openshift/openshift.go | 62 ++++++++++++++++++++++++ 7 files changed, 85 insertions(+), 6 deletions(-) diff --git a/cmd/convert.go b/cmd/convert.go index 8b1c5181..f7c23d63 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -26,12 +26,12 @@ import ( ) var ( - ConvertSource, ConvertOut string - ConvertChart, ConvertDeployment, ConvertDaemonSet bool - ConvertReplicationController, ConvertYaml, ConvertStdout bool - ConvertEmptyVols, ConvertDeploymentConfig bool - ConvertReplicas int - ConvertOpt kobject.ConvertOptions + ConvertSource, ConvertOut string + ConvertChart, ConvertDeployment, ConvertDaemonSet bool + ConvertReplicationController, ConvertYaml, ConvertStdout bool + ConvertEmptyVols, ConvertDeploymentConfig, ConvertBuildConfig bool + ConvertReplicas int + ConvertOpt kobject.ConvertOptions ) var ConvertProvider string = GlobalProvider @@ -53,6 +53,7 @@ var convertCmd = &cobra.Command{ CreateD: ConvertDeployment, CreateDS: ConvertDaemonSet, CreateRC: ConvertReplicationController, + CreateBuildConfig: ConvertBuildConfig, CreateDeploymentConfig: ConvertDeploymentConfig, EmptyVols: ConvertEmptyVols, } @@ -84,6 +85,8 @@ func init() { // OpenShift only convertCmd.Flags().BoolVar(&ConvertDeploymentConfig, "deployment-config", true, "Generate an OpenShift deploymentconfig object") convertCmd.Flags().MarkHidden("deployment-config") + convertCmd.Flags().BoolVar(&ConvertBuildConfig, "build-config", false, "Generate an OpenShift buildconfig object") + convertCmd.Flags().MarkHidden("build-config") // Standard between the two convertCmd.Flags().BoolVarP(&ConvertYaml, "yaml", "y", false, "Generate resource files into yaml format") @@ -107,6 +110,7 @@ Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} Resource Flags: + --build-config Generate an Openshift build config object -c, --chart Create a Helm chart for converted objects --daemon-set Generate a Kubernetes daemonset object -d, --deployment Generate a Kubernetes deployment object diff --git a/pkg/app/app.go b/pkg/app/app.go index 7fae0c41..fd76d9e6 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -28,6 +28,7 @@ import ( _ "k8s.io/kubernetes/pkg/apis/extensions/install" // install OpenShift api + _ "github.com/openshift/origin/pkg/build/api/install" _ "github.com/openshift/origin/pkg/deploy/api/install" _ "github.com/openshift/origin/pkg/image/api/install" _ "github.com/openshift/origin/pkg/route/api/install" @@ -62,6 +63,7 @@ func ValidateFlags(bundle string, args []string, cmd *cobra.Command, opt *kobjec // OpenShift specific flags deploymentConfig := cmd.Flags().Lookup("deployment-config").Changed + buildConfig := cmd.Flags().Lookup("build-config").Changed // Kubernetes specific flags chart := cmd.Flags().Lookup("chart").Changed @@ -88,6 +90,9 @@ func ValidateFlags(bundle string, args []string, cmd *cobra.Command, opt *kobjec if deploymentConfig { logrus.Fatalf("--deployment-config is an OpenShift only flag") } + if buildConfig { + logrus.Fatalf("--build-config is an Openshift only flag") + } } // Standard checks regardless of provider diff --git a/pkg/kobject/kobject.go b/pkg/kobject/kobject.go index 2c4f4340..e5715e37 100644 --- a/pkg/kobject/kobject.go +++ b/pkg/kobject/kobject.go @@ -33,6 +33,7 @@ type ConvertOptions struct { CreateRC bool CreateDS bool CreateDeploymentConfig bool + CreateBuildConfig bool CreateChart bool GenerateYaml bool EmptyVols bool diff --git a/pkg/loader/compose/compose.go b/pkg/loader/compose/compose.go index d73de97a..a8386488 100644 --- a/pkg/loader/compose/compose.go +++ b/pkg/loader/compose/compose.go @@ -270,6 +270,7 @@ func (c *Compose) LoadFile(file string) kobject.KomposeObject { if composeServiceConfig, ok := composeObject.ServiceConfigs.Get(name); ok { serviceConfig := kobject.ServiceConfig{} serviceConfig.Image = composeServiceConfig.Image + serviceConfig.Build = composeServiceConfig.Build.Context serviceConfig.ContainerName = composeServiceConfig.ContainerName serviceConfig.Command = composeServiceConfig.Entrypoint serviceConfig.Args = composeServiceConfig.Command diff --git a/pkg/transformer/kubernetes/k8sutils.go b/pkg/transformer/kubernetes/k8sutils.go index eac71cba..502691a2 100644 --- a/pkg/transformer/kubernetes/k8sutils.go +++ b/pkg/transformer/kubernetes/k8sutils.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/runtime" + buildapi "github.com/openshift/origin/pkg/build/api" deployapi "github.com/openshift/origin/pkg/deploy/api" imageapi "github.com/openshift/origin/pkg/image/api" routeapi "github.com/openshift/origin/pkg/route/api" @@ -211,6 +212,8 @@ func PrintList(objects []runtime.Object, opt kobject.ConvertOptions) error { file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f) case *deployapi.DeploymentConfig: file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f) + case *buildapi.BuildConfig: + file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f) case *imageapi.ImageStream: file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f) case *api.Service: diff --git a/pkg/transformer/kubernetes/kubernetes.go b/pkg/transformer/kubernetes/kubernetes.go index 66fdc34d..eebc03db 100644 --- a/pkg/transformer/kubernetes/kubernetes.go +++ b/pkg/transformer/kubernetes/kubernetes.go @@ -27,6 +27,7 @@ import ( "github.com/fatih/structs" "github.com/kubernetes-incubator/kompose/pkg/kobject" "github.com/kubernetes-incubator/kompose/pkg/transformer" + buildapi "github.com/openshift/origin/pkg/build/api" deployapi "github.com/openshift/origin/pkg/deploy/api" // install kubernetes api @@ -512,6 +513,8 @@ func (k *Kubernetes) UpdateController(obj runtime.Object, updateTemplate func(*a updateTemplate(&p) t.Spec = p.Spec t.ObjectMeta = p.ObjectMeta + case *buildapi.BuildConfig: + updateMeta(&t.ObjectMeta) } } diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 992fa7f6..4a66bfea 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -18,6 +18,7 @@ package openshift import ( "fmt" + "os/exec" "strings" "github.com/kubernetes-incubator/kompose/pkg/kobject" @@ -26,6 +27,7 @@ import ( "github.com/Sirupsen/logrus" "k8s.io/kubernetes/pkg/api" + kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/runtime" @@ -35,6 +37,7 @@ import ( "time" + buildapi "github.com/openshift/origin/pkg/build/api" deployapi "github.com/openshift/origin/pkg/deploy/api" deploymentconfigreaper "github.com/openshift/origin/pkg/deploy/cmd" imageapi "github.com/openshift/origin/pkg/image/api" @@ -72,6 +75,15 @@ func getImageTag(image string) string { } } +// getGitRemote gets git remote URI for the current git repo +func getGitRemote(remote string) string { + out, err := exec.Command("git", "remote", "get-url", remote).Output() + if err != nil { + return "" + } + return string(out) +} + // initImageStream initialize ImageStream object func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) *imageapi.ImageStream { tag := getImageTag(service.Image) @@ -98,6 +110,52 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) return is } +// initBuildConfig initialize Openshifts BuildConfig Object +func initBuildConfig(name string, service kobject.ServiceConfig) *buildapi.BuildConfig { + bc := &buildapi.BuildConfig{ + TypeMeta: unversioned.TypeMeta{ + Kind: "BuildConfig", + APIVersion: "v1", + }, + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: buildapi.BuildConfigSpec{ + // Triggers + []buildapi.BuildTriggerPolicy{ + {Type: "ConfigChange"}, + {Type: "ImageChange"}, + }, + // RunPolicy + "serial", + buildapi.CommonSpec{ + Source: buildapi.BuildSource{ + Git: &buildapi.GitBuildSource{ + Ref: "master", + URI: getGitRemote("origin"), + }, + ContextDir: "./", + }, + Strategy: buildapi.BuildStrategy{ + DockerStrategy: &buildapi.DockerBuildStrategy{ + From: &kapi.ObjectReference{ + Kind: "ImageStreamTag", + Name: name + ":from", + }, + }, + }, + Output: buildapi.BuildOutput{ + To: &kapi.ObjectReference{ + Kind: "ImageStreamTag", + Name: name + ":latest", + }, + }, + }, + }, + } + return bc +} + // initDeploymentConfig initialize OpenShifts DeploymentConfig object func (o *OpenShift) initDeploymentConfig(name string, service kobject.ServiceConfig, replicas int) *deployapi.DeploymentConfig { tag := getImageTag(service.Image) @@ -212,6 +270,10 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C objects = append(objects, o.initImageStream(name, service)) } + if opt.CreateBuildConfig && service.Build != "" { + objects = append(objects, initBuildConfig(name, service)) // Openshift BuildConfigs + } + // If ports not provided in configuration we will not make service if o.PortsExist(name, service) { svc := o.CreateService(name, service, objects) From 1215a6366e2b666701ddd40ad2b8cce2e4d3f097 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 18 Oct 2016 19:16:06 +0530 Subject: [PATCH 02/33] Updated Godeps and vendors. Added: - github.com/openshift/origin/pkg/build/api/install - github.com/openshift/origin/pkg/build/api/v1 --- .../origin/pkg/build/api/install/install.go | 109 + .../origin/pkg/build/api/v1/conversion.go | 189 + .../origin/pkg/build/api/v1/defaults.go | 57 + .../openshift/origin/pkg/build/api/v1/doc.go | 5 + .../origin/pkg/build/api/v1/generated.pb.go | 9765 +++++++++++++++++ .../origin/pkg/build/api/v1/generated.proto | 776 ++ .../origin/pkg/build/api/v1/register.go | 40 + .../origin/pkg/build/api/v1/swagger_doc.go | 459 + .../origin/pkg/build/api/v1/types.go | 884 ++ .../build/api/v1/zz_generated.conversion.go | 1960 ++++ .../pkg/build/api/v1/zz_generated.deepcopy.go | 1014 ++ 11 files changed, 15258 insertions(+) create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/install/install.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/conversion.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/defaults.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/doc.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/register.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/types.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go diff --git a/vendor/github.com/openshift/origin/pkg/build/api/install/install.go b/vendor/github.com/openshift/origin/pkg/build/api/install/install.go new file mode 100644 index 00000000..0d2b5d0b --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/install/install.go @@ -0,0 +1,109 @@ +package install + +import ( + "fmt" + + "github.com/golang/glog" + + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/apimachinery" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" + + "github.com/openshift/origin/pkg/build/api" + "github.com/openshift/origin/pkg/build/api/v1" +) + +const importPrefix = "github.com/openshift/origin/pkg/build/api" + +var accessor = meta.NewAccessor() + +// availableVersions lists all known external versions for this group from most preferred to least preferred +var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion} + +func init() { + registered.RegisterVersions(availableVersions) + externalVersions := []unversioned.GroupVersion{} + for _, v := range availableVersions { + if registered.IsAllowedVersion(v) { + externalVersions = append(externalVersions, v) + } + } + if len(externalVersions) == 0 { + glog.Infof("No version is registered for group %v", api.GroupName) + return + } + + if err := registered.EnableVersions(externalVersions...); err != nil { + panic(err) + } + if err := enableVersions(externalVersions); err != nil { + panic(err) + } +} + +// TODO: enableVersions should be centralized rather than spread in each API +// group. +// We can combine registered.RegisterVersions, registered.EnableVersions and +// registered.RegisterGroup once we have moved enableVersions there. +func enableVersions(externalVersions []unversioned.GroupVersion) error { + addVersionsToScheme(externalVersions...) + preferredExternalVersion := externalVersions[0] + + groupMeta := apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + RESTMapper: newRESTMapper(externalVersions), + SelfLinker: runtime.SelfLinker(accessor), + InterfacesFor: interfacesFor, + } + + if err := registered.RegisterGroup(groupMeta); err != nil { + return err + } + kapi.RegisterRESTMapper(groupMeta.RESTMapper) + return nil +} + +func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { + // add the internal version to Scheme + api.AddToScheme(kapi.Scheme) + // add the enabled external versions to Scheme + for _, v := range externalVersions { + if !registered.IsEnabledVersion(v) { + glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) + continue + } + switch v { + case v1.SchemeGroupVersion: + v1.AddToScheme(kapi.Scheme) + + default: + glog.Errorf("Version %s is not known, so it will not be added to the Scheme.", v) + continue + } + } +} + +func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { + rootScoped := sets.NewString() + ignoredKinds := sets.NewString() + return kapi.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) +} + +func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { + switch version { + case v1.SchemeGroupVersion: + return &meta.VersionInterfaces{ + ObjectConvertor: kapi.Scheme, + MetadataAccessor: accessor, + }, nil + + default: + g, _ := registered.Group(api.GroupName) + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) + } +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/conversion.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/conversion.go new file mode 100644 index 00000000..6a7a6a3f --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/conversion.go @@ -0,0 +1,189 @@ +package v1 + +import ( + "k8s.io/kubernetes/pkg/conversion" + "k8s.io/kubernetes/pkg/runtime" + + oapi "github.com/openshift/origin/pkg/api" + newer "github.com/openshift/origin/pkg/build/api" + buildutil "github.com/openshift/origin/pkg/build/util" + imageapi "github.com/openshift/origin/pkg/image/api" +) + +func Convert_v1_BuildConfig_To_api_BuildConfig(in *BuildConfig, out *newer.BuildConfig, s conversion.Scope) error { + if err := autoConvert_v1_BuildConfig_To_api_BuildConfig(in, out, s); err != nil { + return err + } + + newTriggers := []newer.BuildTriggerPolicy{} + // strip off any default imagechange triggers where the buildconfig's + // "from" is not an ImageStreamTag, because those triggers + // will never be invoked. + imageRef := buildutil.GetInputReference(out.Spec.Strategy) + hasIST := imageRef != nil && imageRef.Kind == "ImageStreamTag" + for _, trigger := range out.Spec.Triggers { + if trigger.Type != newer.ImageChangeBuildTriggerType { + newTriggers = append(newTriggers, trigger) + continue + } + if (trigger.ImageChange == nil || trigger.ImageChange.From == nil) && !hasIST { + continue + } + newTriggers = append(newTriggers, trigger) + } + out.Spec.Triggers = newTriggers + return nil +} + +func Convert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy(in *SourceBuildStrategy, out *newer.SourceBuildStrategy, s conversion.Scope) error { + if err := autoConvert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy(in, out, s); err != nil { + return err + } + switch in.From.Kind { + case "ImageStream": + out.From.Kind = "ImageStreamTag" + out.From.Name = imageapi.JoinImageStreamTag(in.From.Name, "") + } + return nil +} + +func Convert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy(in *DockerBuildStrategy, out *newer.DockerBuildStrategy, s conversion.Scope) error { + if err := autoConvert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy(in, out, s); err != nil { + return err + } + if in.From != nil { + switch in.From.Kind { + case "ImageStream": + out.From.Kind = "ImageStreamTag" + out.From.Name = imageapi.JoinImageStreamTag(in.From.Name, "") + } + } + return nil +} + +func Convert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy(in *CustomBuildStrategy, out *newer.CustomBuildStrategy, s conversion.Scope) error { + if err := autoConvert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy(in, out, s); err != nil { + return err + } + switch in.From.Kind { + case "ImageStream": + out.From.Kind = "ImageStreamTag" + out.From.Name = imageapi.JoinImageStreamTag(in.From.Name, "") + } + return nil +} + +func Convert_v1_BuildOutput_To_api_BuildOutput(in *BuildOutput, out *newer.BuildOutput, s conversion.Scope) error { + if err := autoConvert_v1_BuildOutput_To_api_BuildOutput(in, out, s); err != nil { + return err + } + if in.To != nil && (in.To.Kind == "ImageStream" || len(in.To.Kind) == 0) { + out.To.Kind = "ImageStreamTag" + out.To.Name = imageapi.JoinImageStreamTag(in.To.Name, "") + } + return nil +} + +func Convert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy(in *BuildTriggerPolicy, out *newer.BuildTriggerPolicy, s conversion.Scope) error { + if err := autoConvert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy(in, out, s); err != nil { + return err + } + + switch in.Type { + case ImageChangeBuildTriggerTypeDeprecated: + out.Type = newer.ImageChangeBuildTriggerType + case GenericWebHookBuildTriggerTypeDeprecated: + out.Type = newer.GenericWebHookBuildTriggerType + case GitHubWebHookBuildTriggerTypeDeprecated: + out.Type = newer.GitHubWebHookBuildTriggerType + } + return nil +} + +func Convert_api_SourceRevision_To_v1_SourceRevision(in *newer.SourceRevision, out *SourceRevision, s conversion.Scope) error { + if err := autoConvert_api_SourceRevision_To_v1_SourceRevision(in, out, s); err != nil { + return err + } + out.Type = BuildSourceGit + return nil +} + +func Convert_api_BuildSource_To_v1_BuildSource(in *newer.BuildSource, out *BuildSource, s conversion.Scope) error { + if err := autoConvert_api_BuildSource_To_v1_BuildSource(in, out, s); err != nil { + return err + } + switch { + // it is legal for a buildsource to have both a git+dockerfile source, but in v1 that was represented + // as type git. + case in.Git != nil: + out.Type = BuildSourceGit + // it is legal for a buildsource to have both a binary+dockerfile source, but in v1 that was represented + // as type binary. + case in.Binary != nil: + out.Type = BuildSourceBinary + case in.Dockerfile != nil: + out.Type = BuildSourceDockerfile + case len(in.Images) > 0: + out.Type = BuildSourceImage + default: + out.Type = BuildSourceNone + } + return nil +} + +func Convert_api_BuildStrategy_To_v1_BuildStrategy(in *newer.BuildStrategy, out *BuildStrategy, s conversion.Scope) error { + if err := autoConvert_api_BuildStrategy_To_v1_BuildStrategy(in, out, s); err != nil { + return err + } + switch { + case in.SourceStrategy != nil: + out.Type = SourceBuildStrategyType + case in.DockerStrategy != nil: + out.Type = DockerBuildStrategyType + case in.CustomStrategy != nil: + out.Type = CustomBuildStrategyType + case in.JenkinsPipelineStrategy != nil: + out.Type = JenkinsPipelineBuildStrategyType + default: + out.Type = "" + } + return nil +} + +func addConversionFuncs(scheme *runtime.Scheme) error { + if err := scheme.AddConversionFuncs( + Convert_v1_BuildConfig_To_api_BuildConfig, + Convert_api_BuildConfig_To_v1_BuildConfig, + Convert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy, + Convert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy, + Convert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy, + Convert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy, + Convert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy, + Convert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy, + Convert_v1_BuildOutput_To_api_BuildOutput, + Convert_api_BuildOutput_To_v1_BuildOutput, + Convert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy, + Convert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy, + Convert_v1_SourceRevision_To_api_SourceRevision, + Convert_api_SourceRevision_To_v1_SourceRevision, + Convert_v1_BuildSource_To_api_BuildSource, + Convert_api_BuildSource_To_v1_BuildSource, + Convert_v1_BuildStrategy_To_api_BuildStrategy, + Convert_api_BuildStrategy_To_v1_BuildStrategy, + ); err != nil { + return err + } + + if err := scheme.AddFieldLabelConversionFunc("v1", "Build", + oapi.GetFieldLabelConversionFunc(newer.BuildToSelectableFields(&newer.Build{}), map[string]string{"name": "metadata.name"}), + ); err != nil { + return err + } + + if err := scheme.AddFieldLabelConversionFunc("v1", "BuildConfig", + oapi.GetFieldLabelConversionFunc(newer.BuildConfigToSelectableFields(&newer.BuildConfig{}), map[string]string{"name": "metadata.name"}), + ); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/defaults.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/defaults.go new file mode 100644 index 00000000..57527d07 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/defaults.go @@ -0,0 +1,57 @@ +package v1 + +import "k8s.io/kubernetes/pkg/runtime" + +func SetDefaults_BuildConfigSpec(config *BuildConfigSpec) { + if len(config.RunPolicy) == 0 { + config.RunPolicy = BuildRunPolicySerial + } +} + +func SetDefaults_BuildSource(source *BuildSource) { + if (source != nil) && (source.Type == BuildSourceBinary) && (source.Binary == nil) { + source.Binary = &BinaryBuildSource{} + } +} + +func SetDefaults_BuildStrategy(strategy *BuildStrategy) { + if (strategy != nil) && (strategy.Type == DockerBuildStrategyType) && (strategy.DockerStrategy == nil) { + strategy.DockerStrategy = &DockerBuildStrategy{} + } +} + +func SetDefaults_SourceBuildStrategy(obj *SourceBuildStrategy) { + if len(obj.From.Kind) == 0 { + obj.From.Kind = "ImageStreamTag" + } +} + +func SetDefaults_DockerBuildStrategy(obj *DockerBuildStrategy) { + if obj.From != nil && len(obj.From.Kind) == 0 { + obj.From.Kind = "ImageStreamTag" + } +} + +func SetDefaults_CustomBuildStrategy(obj *CustomBuildStrategy) { + if len(obj.From.Kind) == 0 { + obj.From.Kind = "ImageStreamTag" + } +} + +func SetDefaults_BuildTriggerPolicy(obj *BuildTriggerPolicy) { + if obj.Type == ImageChangeBuildTriggerType && obj.ImageChange == nil { + obj.ImageChange = &ImageChangeTrigger{} + } +} + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs( + SetDefaults_BuildConfigSpec, + SetDefaults_BuildSource, + SetDefaults_BuildStrategy, + SetDefaults_SourceBuildStrategy, + SetDefaults_DockerBuildStrategy, + SetDefaults_CustomBuildStrategy, + SetDefaults_BuildTriggerPolicy, + ) +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/doc.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/doc.go new file mode 100644 index 00000000..dd46d500 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/api + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go new file mode 100644 index 00000000..2cd5d28c --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go @@ -0,0 +1,9765 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/openshift/origin/pkg/build/api/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + github.com/openshift/origin/pkg/build/api/v1/generated.proto + + It has these top-level messages: + BinaryBuildRequestOptions + BinaryBuildSource + Build + BuildConfig + BuildConfigList + BuildConfigSpec + BuildConfigStatus + BuildList + BuildLog + BuildLogOptions + BuildOutput + BuildPostCommitSpec + BuildRequest + BuildSource + BuildSpec + BuildStatus + BuildStrategy + BuildTriggerCause + BuildTriggerPolicy + CommonSpec + CustomBuildStrategy + DockerBuildStrategy + GenericWebHookCause + GenericWebHookEvent + GitBuildSource + GitHubWebHookCause + GitInfo + GitSourceRevision + ImageChangeCause + ImageChangeTrigger + ImageSource + ImageSourcePath + JenkinsPipelineBuildStrategy + SecretBuildSource + SecretSpec + SourceBuildStrategy + SourceControlUser + SourceRevision + WebHookTrigger +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" + +import time "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} } +func (*BinaryBuildRequestOptions) ProtoMessage() {} +func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} } +func (*BinaryBuildSource) ProtoMessage() {} +func (*BinaryBuildSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Build) Reset() { *m = Build{} } +func (*Build) ProtoMessage() {} +func (*Build) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *BuildConfig) Reset() { *m = BuildConfig{} } +func (*BuildConfig) ProtoMessage() {} +func (*BuildConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *BuildConfigList) Reset() { *m = BuildConfigList{} } +func (*BuildConfigList) ProtoMessage() {} +func (*BuildConfigList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} } +func (*BuildConfigSpec) ProtoMessage() {} +func (*BuildConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} } +func (*BuildConfigStatus) ProtoMessage() {} +func (*BuildConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *BuildList) Reset() { *m = BuildList{} } +func (*BuildList) ProtoMessage() {} +func (*BuildList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *BuildLog) Reset() { *m = BuildLog{} } +func (*BuildLog) ProtoMessage() {} +func (*BuildLog) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} } +func (*BuildLogOptions) ProtoMessage() {} +func (*BuildLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *BuildOutput) Reset() { *m = BuildOutput{} } +func (*BuildOutput) ProtoMessage() {} +func (*BuildOutput) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} } +func (*BuildPostCommitSpec) ProtoMessage() {} +func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *BuildRequest) Reset() { *m = BuildRequest{} } +func (*BuildRequest) ProtoMessage() {} +func (*BuildRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *BuildSource) Reset() { *m = BuildSource{} } +func (*BuildSource) ProtoMessage() {} +func (*BuildSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *BuildSpec) Reset() { *m = BuildSpec{} } +func (*BuildSpec) ProtoMessage() {} +func (*BuildSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *BuildStatus) Reset() { *m = BuildStatus{} } +func (*BuildStatus) ProtoMessage() {} +func (*BuildStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *BuildStrategy) Reset() { *m = BuildStrategy{} } +func (*BuildStrategy) ProtoMessage() {} +func (*BuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} } +func (*BuildTriggerCause) ProtoMessage() {} +func (*BuildTriggerCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} } +func (*BuildTriggerPolicy) ProtoMessage() {} +func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *CommonSpec) Reset() { *m = CommonSpec{} } +func (*CommonSpec) ProtoMessage() {} +func (*CommonSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} } +func (*CustomBuildStrategy) ProtoMessage() {} +func (*CustomBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} } +func (*DockerBuildStrategy) ProtoMessage() {} +func (*DockerBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} } +func (*GenericWebHookCause) ProtoMessage() {} +func (*GenericWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} } +func (*GenericWebHookEvent) ProtoMessage() {} +func (*GenericWebHookEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *GitBuildSource) Reset() { *m = GitBuildSource{} } +func (*GitBuildSource) ProtoMessage() {} +func (*GitBuildSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} } +func (*GitHubWebHookCause) ProtoMessage() {} +func (*GitHubWebHookCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *GitInfo) Reset() { *m = GitInfo{} } +func (*GitInfo) ProtoMessage() {} +func (*GitInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *GitSourceRevision) Reset() { *m = GitSourceRevision{} } +func (*GitSourceRevision) ProtoMessage() {} +func (*GitSourceRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *ImageChangeCause) Reset() { *m = ImageChangeCause{} } +func (*ImageChangeCause) ProtoMessage() {} +func (*ImageChangeCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigger{} } +func (*ImageChangeTrigger) ProtoMessage() {} +func (*ImageChangeTrigger) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} } +func (*ImageSourcePath) ProtoMessage() {} +func (*ImageSourcePath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } + +func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} } +func (*JenkinsPipelineBuildStrategy) ProtoMessage() {} +func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{32} +} + +func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} } +func (*SecretBuildSource) ProtoMessage() {} +func (*SecretBuildSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} } +func (*SourceBuildStrategy) ProtoMessage() {} +func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *SourceControlUser) Reset() { *m = SourceControlUser{} } +func (*SourceControlUser) ProtoMessage() {} +func (*SourceControlUser) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *SourceRevision) Reset() { *m = SourceRevision{} } +func (*SourceRevision) ProtoMessage() {} +func (*SourceRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} } +func (*WebHookTrigger) ProtoMessage() {} +func (*WebHookTrigger) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func init() { + proto.RegisterType((*BinaryBuildRequestOptions)(nil), "github.com.openshift.origin.pkg.build.api.v1.BinaryBuildRequestOptions") + proto.RegisterType((*BinaryBuildSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.BinaryBuildSource") + proto.RegisterType((*Build)(nil), "github.com.openshift.origin.pkg.build.api.v1.Build") + proto.RegisterType((*BuildConfig)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildConfig") + proto.RegisterType((*BuildConfigList)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildConfigList") + proto.RegisterType((*BuildConfigSpec)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildConfigSpec") + proto.RegisterType((*BuildConfigStatus)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildConfigStatus") + proto.RegisterType((*BuildList)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildList") + proto.RegisterType((*BuildLog)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildLog") + proto.RegisterType((*BuildLogOptions)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildLogOptions") + proto.RegisterType((*BuildOutput)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildOutput") + proto.RegisterType((*BuildPostCommitSpec)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildPostCommitSpec") + proto.RegisterType((*BuildRequest)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildRequest") + proto.RegisterType((*BuildSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildSource") + proto.RegisterType((*BuildSpec)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildSpec") + proto.RegisterType((*BuildStatus)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildStatus") + proto.RegisterType((*BuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildStrategy") + proto.RegisterType((*BuildTriggerCause)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildTriggerCause") + proto.RegisterType((*BuildTriggerPolicy)(nil), "github.com.openshift.origin.pkg.build.api.v1.BuildTriggerPolicy") + proto.RegisterType((*CommonSpec)(nil), "github.com.openshift.origin.pkg.build.api.v1.CommonSpec") + proto.RegisterType((*CustomBuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.CustomBuildStrategy") + proto.RegisterType((*DockerBuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.DockerBuildStrategy") + proto.RegisterType((*GenericWebHookCause)(nil), "github.com.openshift.origin.pkg.build.api.v1.GenericWebHookCause") + proto.RegisterType((*GenericWebHookEvent)(nil), "github.com.openshift.origin.pkg.build.api.v1.GenericWebHookEvent") + proto.RegisterType((*GitBuildSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.GitBuildSource") + proto.RegisterType((*GitHubWebHookCause)(nil), "github.com.openshift.origin.pkg.build.api.v1.GitHubWebHookCause") + proto.RegisterType((*GitInfo)(nil), "github.com.openshift.origin.pkg.build.api.v1.GitInfo") + proto.RegisterType((*GitSourceRevision)(nil), "github.com.openshift.origin.pkg.build.api.v1.GitSourceRevision") + proto.RegisterType((*ImageChangeCause)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageChangeCause") + proto.RegisterType((*ImageChangeTrigger)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageChangeTrigger") + proto.RegisterType((*ImageSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageSource") + proto.RegisterType((*ImageSourcePath)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageSourcePath") + proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.JenkinsPipelineBuildStrategy") + proto.RegisterType((*SecretBuildSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.SecretBuildSource") + proto.RegisterType((*SecretSpec)(nil), "github.com.openshift.origin.pkg.build.api.v1.SecretSpec") + proto.RegisterType((*SourceBuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.SourceBuildStrategy") + proto.RegisterType((*SourceControlUser)(nil), "github.com.openshift.origin.pkg.build.api.v1.SourceControlUser") + proto.RegisterType((*SourceRevision)(nil), "github.com.openshift.origin.pkg.build.api.v1.SourceRevision") + proto.RegisterType((*WebHookTrigger)(nil), "github.com.openshift.origin.pkg.build.api.v1.WebHookTrigger") +} +func (m *BinaryBuildRequestOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BinaryBuildRequestOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.AsFile))) + i += copy(data[i:], m.AsFile) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Commit))) + i += copy(data[i:], m.Commit) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.AuthorName))) + i += copy(data[i:], m.AuthorName) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.AuthorEmail))) + i += copy(data[i:], m.AuthorEmail) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.CommitterName))) + i += copy(data[i:], m.CommitterName) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.CommitterEmail))) + i += copy(data[i:], m.CommitterEmail) + return i, nil +} + +func (m *BinaryBuildSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BinaryBuildSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.AsFile))) + i += copy(data[i:], m.AsFile) + return i, nil +} + +func (m *Build) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Build) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n3, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n4, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *BuildConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n5, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n6, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *BuildConfigList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildConfigList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *BuildConfigSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildConfigSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Triggers) > 0 { + for _, msg := range m.Triggers { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RunPolicy))) + i += copy(data[i:], m.RunPolicy) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CommonSpec.Size())) + n9, err := m.CommonSpec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *BuildConfigStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildConfigStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastVersion)) + return i, nil +} + +func (m *BuildList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *BuildLog) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildLog) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *BuildLogOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildLogOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + data[i] = 0x10 + i++ + if m.Follow { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Previous { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SinceSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) + n11, err := m.SinceTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + data[i] = 0x30 + i++ + if m.Timestamps { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.TailLines != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes)) + } + data[i] = 0x48 + i++ + if m.NoWait { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.Version != nil { + data[i] = 0x50 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Version)) + } + return i, nil +} + +func (m *BuildOutput) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildOutput) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.To != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.To.Size())) + n12, err := m.To.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.PushSecret != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PushSecret.Size())) + n13, err := m.PushSecret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *BuildPostCommitSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildPostCommitSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Script))) + i += copy(data[i:], m.Script) + return i, nil +} + +func (m *BuildRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n14, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + if m.Revision != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision.Size())) + n15, err := m.Revision.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.TriggeredByImage != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TriggeredByImage.Size())) + n16, err := m.TriggeredByImage.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.From != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.From.Size())) + n17, err := m.From.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Binary != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Binary.Size())) + n18, err := m.Binary.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.LastVersion != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.LastVersion)) + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.TriggeredBy) > 0 { + for _, msg := range m.TriggeredBy { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *BuildSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Binary != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Binary.Size())) + n19, err := m.Binary.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.Dockerfile != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.Dockerfile))) + i += copy(data[i:], *m.Dockerfile) + } + if m.Git != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Git.Size())) + n20, err := m.Git.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if len(m.Images) > 0 { + for _, msg := range m.Images { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContextDir))) + i += copy(data[i:], m.ContextDir) + if m.SourceSecret != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SourceSecret.Size())) + n21, err := m.SourceSecret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *BuildSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.CommonSpec.Size())) + n22, err := m.CommonSpec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + if len(m.TriggeredBy) > 0 { + for _, msg := range m.TriggeredBy { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *BuildStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + data[i] = 0x10 + i++ + if m.Cancelled { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + if m.StartTimestamp != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTimestamp.Size())) + n23, err := m.StartTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.CompletionTimestamp != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTimestamp.Size())) + n24, err := m.CompletionTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + } + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Duration)) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OutputDockerImageReference))) + i += copy(data[i:], m.OutputDockerImageReference) + if m.Config != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Config.Size())) + n25, err := m.Config.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *BuildStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.DockerStrategy != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DockerStrategy.Size())) + n26, err := m.DockerStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.SourceStrategy != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SourceStrategy.Size())) + n27, err := m.SourceStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.CustomStrategy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CustomStrategy.Size())) + n28, err := m.CustomStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.JenkinsPipelineStrategy != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.JenkinsPipelineStrategy.Size())) + n29, err := m.JenkinsPipelineStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *BuildTriggerCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildTriggerCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + if m.GenericWebHook != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.GenericWebHook.Size())) + n30, err := m.GenericWebHook.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.GitHubWebHook != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitHubWebHook.Size())) + n31, err := m.GitHubWebHook.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.ImageChangeBuild != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ImageChangeBuild.Size())) + n32, err := m.ImageChangeBuild.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *BuildTriggerPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *BuildTriggerPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.GitHubWebHook != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitHubWebHook.Size())) + n33, err := m.GitHubWebHook.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.GenericWebHook != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GenericWebHook.Size())) + n34, err := m.GenericWebHook.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.ImageChange != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ImageChange.Size())) + n35, err := m.ImageChange.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *CommonSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CommonSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccount))) + i += copy(data[i:], m.ServiceAccount) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) + n36, err := m.Source.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + if m.Revision != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision.Size())) + n37, err := m.Revision.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + } + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n38, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Output.Size())) + n39, err := m.Output.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n40, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PostCommit.Size())) + n41, err := m.PostCommit.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + if m.CompletionDeadlineSeconds != nil { + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CompletionDeadlineSeconds)) + } + return i, nil +} + +func (m *CustomBuildStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomBuildStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.From.Size())) + n42, err := m.From.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + if m.PullSecret != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) + n43, err := m.PullSecret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x20 + i++ + if m.ExposeDockerSocket { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x28 + i++ + if m.ForcePull { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.BuildAPIVersion))) + i += copy(data[i:], m.BuildAPIVersion) + return i, nil +} + +func (m *DockerBuildStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DockerBuildStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.From != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.From.Size())) + n44, err := m.From.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.PullSecret != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) + n45, err := m.PullSecret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + } + data[i] = 0x18 + i++ + if m.NoCache { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x28 + i++ + if m.ForcePull { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DockerfilePath))) + i += copy(data[i:], m.DockerfilePath) + return i, nil +} + +func (m *GenericWebHookCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GenericWebHookCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision.Size())) + n46, err := m.Revision.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Secret))) + i += copy(data[i:], m.Secret) + return i, nil +} + +func (m *GenericWebHookEvent) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GenericWebHookEvent) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Git != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Git.Size())) + n47, err := m.Git.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *GitBuildSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitBuildSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.URI))) + i += copy(data[i:], m.URI) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Ref))) + i += copy(data[i:], m.Ref) + if m.HTTPProxy != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.HTTPProxy))) + i += copy(data[i:], *m.HTTPProxy) + } + if m.HTTPSProxy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.HTTPSProxy))) + i += copy(data[i:], *m.HTTPSProxy) + } + return i, nil +} + +func (m *GitHubWebHookCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitHubWebHookCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision.Size())) + n48, err := m.Revision.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Secret))) + i += copy(data[i:], m.Secret) + return i, nil +} + +func (m *GitInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitBuildSource.Size())) + n49, err := m.GitBuildSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitSourceRevision.Size())) + n50, err := m.GitSourceRevision.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + return i, nil +} + +func (m *GitSourceRevision) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitSourceRevision) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Commit))) + i += copy(data[i:], m.Commit) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Author.Size())) + n51, err := m.Author.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Committer.Size())) + n52, err := m.Committer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ImageChangeCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ImageChangeCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImageID))) + i += copy(data[i:], m.ImageID) + if m.FromRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FromRef.Size())) + n53, err := m.FromRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + } + return i, nil +} + +func (m *ImageChangeTrigger) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ImageChangeTrigger) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LastTriggeredImageID))) + i += copy(data[i:], m.LastTriggeredImageID) + if m.From != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.From.Size())) + n54, err := m.From.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + } + return i, nil +} + +func (m *ImageSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ImageSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.From.Size())) + n55, err := m.From.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + if len(m.Paths) > 0 { + for _, msg := range m.Paths { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.PullSecret != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) + n56, err := m.PullSecret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + } + return i, nil +} + +func (m *ImageSourcePath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ImageSourcePath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SourcePath))) + i += copy(data[i:], m.SourcePath) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DestinationDir))) + i += copy(data[i:], m.DestinationDir) + return i, nil +} + +func (m *JenkinsPipelineBuildStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JenkinsPipelineBuildStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.JenkinsfilePath))) + i += copy(data[i:], m.JenkinsfilePath) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Jenkinsfile))) + i += copy(data[i:], m.Jenkinsfile) + return i, nil +} + +func (m *SecretBuildSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretBuildSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n57, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DestinationDir))) + i += copy(data[i:], m.DestinationDir) + return i, nil +} + +func (m *SecretSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretSource.Size())) + n58, err := m.SecretSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) + i += copy(data[i:], m.MountPath) + return i, nil +} + +func (m *SourceBuildStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SourceBuildStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.From.Size())) + n59, err := m.From.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + if m.PullSecret != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) + n60, err := m.PullSecret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Scripts))) + i += copy(data[i:], m.Scripts) + if m.Incremental != nil { + data[i] = 0x28 + i++ + if *m.Incremental { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x30 + i++ + if m.ForcePull { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RuntimeImage != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RuntimeImage.Size())) + n61, err := m.RuntimeImage.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + } + if len(m.RuntimeArtifacts) > 0 { + for _, msg := range m.RuntimeArtifacts { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SourceControlUser) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SourceControlUser) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Email))) + i += copy(data[i:], m.Email) + return i, nil +} + +func (m *SourceRevision) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SourceRevision) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Git != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Git.Size())) + n62, err := m.Git.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + } + return i, nil +} + +func (m *WebHookTrigger) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WebHookTrigger) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Secret))) + i += copy(data[i:], m.Secret) + data[i] = 0x10 + i++ + if m.AllowEnv { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *BinaryBuildRequestOptions) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorEmail) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterEmail) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BinaryBuildSource) Size() (n int) { + var l int + _ = l + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Build) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfig) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfigList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildConfigSpec) Size() (n int) { + var l int + _ = l + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RunPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfigStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LastVersion)) + return n +} + +func (m *BuildList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildLog) Size() (n int) { + var l int + _ = l + return n +} + +func (m *BuildLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + return n +} + +func (m *BuildOutput) Size() (n int) { + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PushSecret != nil { + l = m.PushSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildPostCommitSpec) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildRequest) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TriggeredByImage != nil { + l = m.TriggeredByImage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastVersion != nil { + n += 1 + sovGenerated(uint64(*m.LastVersion)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildSource) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Dockerfile != nil { + l = len(*m.Dockerfile) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContextDir) + n += 1 + l + sovGenerated(uint64(l)) + if m.SourceSecret != nil { + l = m.SourceSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildSpec) Size() (n int) { + var l int + _ = l + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTimestamp != nil { + l = m.CompletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Duration)) + l = len(m.OutputDockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.DockerStrategy != nil { + l = m.DockerStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategy != nil { + l = m.SourceStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CustomStrategy != nil { + l = m.CustomStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JenkinsPipelineStrategy != nil { + l = m.JenkinsPipelineStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerCause) Size() (n int) { + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChangeBuild != nil { + l = m.ImageChangeBuild.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerPolicy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChange != nil { + l = m.ImageChange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CommonSpec) Size() (n int) { + var l int + _ = l + l = len(m.ServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.PostCommit.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CompletionDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds)) + } + return n +} + +func (m *CustomBuildStrategy) Size() (n int) { + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.BuildAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DockerBuildStrategy) Size() (n int) { + var l int + _ = l + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.DockerfilePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GenericWebHookCause) Size() (n int) { + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GenericWebHookEvent) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GitBuildSource) Size() (n int) { + var l int + _ = l + l = len(m.URI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + if m.HTTPProxy != nil { + l = len(*m.HTTPProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPSProxy != nil { + l = len(*m.HTTPSProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GitHubWebHookCause) Size() (n int) { + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitInfo) Size() (n int) { + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitSourceRevision) Size() (n int) { + var l int + _ = l + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Author.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Committer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageChangeCause) Size() (n int) { + var l int + _ = l + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.FromRef != nil { + l = m.FromRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageChangeTrigger) Size() (n int) { + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageSource) Size() (n int) { + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageSourcePath) Size() (n int) { + var l int + _ = l + l = len(m.SourcePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JenkinsPipelineBuildStrategy) Size() (n int) { + var l int + _ = l + l = len(m.JenkinsfilePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Jenkinsfile) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretBuildSource) Size() (n int) { + var l int + _ = l + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + var l int + _ = l + l = m.SecretSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceBuildStrategy) Size() (n int) { + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Scripts) + n += 1 + l + sovGenerated(uint64(l)) + if m.Incremental != nil { + n += 2 + } + n += 2 + if m.RuntimeImage != nil { + l = m.RuntimeImage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.RuntimeArtifacts) > 0 { + for _, e := range m.RuntimeArtifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SourceControlUser) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Email) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceRevision) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebHookTrigger) Size() (n int) { + var l int + _ = l + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BinaryBuildRequestOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildRequestOptions{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `AuthorName:` + fmt.Sprintf("%v", this.AuthorName) + `,`, + `AuthorEmail:` + fmt.Sprintf("%v", this.AuthorEmail) + `,`, + `CommitterName:` + fmt.Sprintf("%v", this.CommitterName) + `,`, + `CommitterEmail:` + fmt.Sprintf("%v", this.CommitterEmail) + `,`, + `}`, + }, "") + return s +} +func (this *BinaryBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildSource{`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `}`, + }, "") + return s +} +func (this *Build) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Build{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildSpec", "BuildSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildStatus", "BuildStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildConfigSpec", "BuildConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildConfigStatus", "BuildConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "BuildConfig", "BuildConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfigSpec{`, + `Triggers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Triggers), "BuildTriggerPolicy", "BuildTriggerPolicy", 1), `&`, ``, 1) + `,`, + `RunPolicy:` + fmt.Sprintf("%v", this.RunPolicy) + `,`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfigStatus{`, + `LastVersion:` + fmt.Sprintf("%v", this.LastVersion) + `,`, + `}`, + }, "") + return s +} +func (this *BuildList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Build", "Build", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLog{`, + `}`, + }, "") + return s +} +func (this *BuildLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *BuildOutput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildOutput{`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildPostCommitSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildPostCommitSpec{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, + `}`, + }, "") + return s +} +func (this *BuildRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(fmt.Sprintf("%v", this.Revision), "SourceRevision", "SourceRevision", 1) + `,`, + `TriggeredByImage:` + strings.Replace(fmt.Sprintf("%v", this.TriggeredByImage), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `Binary:` + strings.Replace(fmt.Sprintf("%v", this.Binary), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `LastVersion:` + valueToStringGenerated(this.LastVersion) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `TriggeredBy:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TriggeredBy), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Binary:` + strings.Replace(fmt.Sprintf("%v", this.Binary), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `Dockerfile:` + valueToStringGenerated(this.Dockerfile) + `,`, + `Git:` + strings.Replace(fmt.Sprintf("%v", this.Git), "GitBuildSource", "GitBuildSource", 1) + `,`, + `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ImageSource", "ImageSource", 1), `&`, ``, 1) + `,`, + `ContextDir:` + fmt.Sprintf("%v", this.ContextDir) + `,`, + `SourceSecret:` + strings.Replace(fmt.Sprintf("%v", this.SourceSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretBuildSource", "SecretBuildSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildSpec{`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `TriggeredBy:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TriggeredBy), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Cancelled:` + fmt.Sprintf("%v", this.Cancelled) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StartTimestamp), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `CompletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTimestamp), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1) + `,`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `OutputDockerImageReference:` + fmt.Sprintf("%v", this.OutputDockerImageReference) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DockerStrategy:` + strings.Replace(fmt.Sprintf("%v", this.DockerStrategy), "DockerBuildStrategy", "DockerBuildStrategy", 1) + `,`, + `SourceStrategy:` + strings.Replace(fmt.Sprintf("%v", this.SourceStrategy), "SourceBuildStrategy", "SourceBuildStrategy", 1) + `,`, + `CustomStrategy:` + strings.Replace(fmt.Sprintf("%v", this.CustomStrategy), "CustomBuildStrategy", "CustomBuildStrategy", 1) + `,`, + `JenkinsPipelineStrategy:` + strings.Replace(fmt.Sprintf("%v", this.JenkinsPipelineStrategy), "JenkinsPipelineBuildStrategy", "JenkinsPipelineBuildStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerCause{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `GenericWebHook:` + strings.Replace(fmt.Sprintf("%v", this.GenericWebHook), "GenericWebHookCause", "GenericWebHookCause", 1) + `,`, + `GitHubWebHook:` + strings.Replace(fmt.Sprintf("%v", this.GitHubWebHook), "GitHubWebHookCause", "GitHubWebHookCause", 1) + `,`, + `ImageChangeBuild:` + strings.Replace(fmt.Sprintf("%v", this.ImageChangeBuild), "ImageChangeCause", "ImageChangeCause", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GitHubWebHook:` + strings.Replace(fmt.Sprintf("%v", this.GitHubWebHook), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `GenericWebHook:` + strings.Replace(fmt.Sprintf("%v", this.GenericWebHook), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `ImageChange:` + strings.Replace(fmt.Sprintf("%v", this.ImageChange), "ImageChangeTrigger", "ImageChangeTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommonSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonSpec{`, + `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildSource", "BuildSource", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(fmt.Sprintf("%v", this.Revision), "SourceRevision", "SourceRevision", 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "BuildStrategy", "BuildStrategy", 1), `&`, ``, 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildOutput", "BuildOutput", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "k8s_io_kubernetes_pkg_api_v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`, + `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *CustomBuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `ExposeDockerSocket:` + fmt.Sprintf("%v", this.ExposeDockerSocket) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + `,`, + `BuildAPIVersion:` + fmt.Sprintf("%v", this.BuildAPIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *DockerBuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DockerBuildStrategy{`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `NoCache:` + fmt.Sprintf("%v", this.NoCache) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `DockerfilePath:` + fmt.Sprintf("%v", this.DockerfilePath) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericWebHookCause{`, + `Revision:` + strings.Replace(fmt.Sprintf("%v", this.Revision), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericWebHookEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(fmt.Sprintf("%v", this.Git), "GitInfo", "GitInfo", 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitBuildSource{`, + `URI:` + fmt.Sprintf("%v", this.URI) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, + `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `}`, + }, "") + return s +} +func (this *GitHubWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitHubWebHookCause{`, + `Revision:` + strings.Replace(fmt.Sprintf("%v", this.Revision), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GitInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitSourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitSourceRevision{`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Author:` + strings.Replace(strings.Replace(this.Author.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Committer:` + strings.Replace(strings.Replace(this.Committer.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeCause{`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `FromRef:` + strings.Replace(fmt.Sprintf("%v", this.FromRef), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTrigger{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageSource{`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSourcePath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageSourcePath{`, + `SourcePath:` + fmt.Sprintf("%v", this.SourcePath) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *JenkinsPipelineBuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JenkinsPipelineBuildStrategy{`, + `JenkinsfilePath:` + fmt.Sprintf("%v", this.JenkinsfilePath) + `,`, + `Jenkinsfile:` + fmt.Sprintf("%v", this.Jenkinsfile) + `,`, + `}`, + }, "") + return s +} +func (this *SecretBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretBuildSource{`, + `Secret:` + strings.Replace(strings.Replace(this.Secret.String(), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `SecretSource:` + strings.Replace(strings.Replace(this.SecretSource.String(), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `}`, + }, "") + return s +} +func (this *SourceBuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `Scripts:` + fmt.Sprintf("%v", this.Scripts) + `,`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `RuntimeImage:` + strings.Replace(fmt.Sprintf("%v", this.RuntimeImage), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, + `RuntimeArtifacts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuntimeArtifacts), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SourceControlUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceControlUser{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `}`, + }, "") + return s +} +func (this *SourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceRevision{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(fmt.Sprintf("%v", this.Git), "GitSourceRevision", "GitSourceRevision", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebHookTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebHookTrigger{`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AllowEnv:` + fmt.Sprintf("%v", this.AllowEnv) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BinaryBuildRequestOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorEmail = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterEmail = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinaryBuildSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Build) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Build: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BuildConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Triggers = append(m.Triggers, BuildTriggerPolicy{}) + if err := m.Triggers[len(m.Triggers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunPolicy = BuildRunPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + m.LastVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.LastVersion |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Build{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLog) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLogOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOutput) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.To.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PushSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PushSecret == nil { + m.PushSecret = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.PushSecret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildPostCommitSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildPostCommitSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildPostCommitSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Script = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredByImage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TriggeredByImage == nil { + m.TriggeredByImage = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.TriggeredByImage.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LastVersion = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dockerfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Dockerfile = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitBuildSource{} + } + if err := m.Git.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageSource{}) + if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContextDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContextDir = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceSecret == nil { + m.SourceSecret = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.SourceSecret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretBuildSource{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = BuildPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancelled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancelled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTimestamp == nil { + m.StartTimestamp = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.StartTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTimestamp == nil { + m.CompletionTimestamp = &k8s_io_kubernetes_pkg_api_unversioned.Time{} + } + if err := m.CompletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Duration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutputDockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutputDockerImageReference = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.Config.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategy == nil { + m.DockerStrategy = &DockerBuildStrategy{} + } + if err := m.DockerStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategy == nil { + m.SourceStrategy = &SourceBuildStrategy{} + } + if err := m.SourceStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomStrategy == nil { + m.CustomStrategy = &CustomBuildStrategy{} + } + if err := m.CustomStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsPipelineStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JenkinsPipelineStrategy == nil { + m.JenkinsPipelineStrategy = &JenkinsPipelineBuildStrategy{} + } + if err := m.JenkinsPipelineStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &GenericWebHookCause{} + } + if err := m.GenericWebHook.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &GitHubWebHookCause{} + } + if err := m.GitHubWebHook.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeBuild", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeBuild == nil { + m.ImageChangeBuild = &ImageChangeCause{} + } + if err := m.ImageChangeBuild.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildTriggerType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &WebHookTrigger{} + } + if err := m.GitHubWebHook.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &WebHookTrigger{} + } + if err := m.GenericWebHook.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChange == nil { + m.ImageChange = &ImageChangeTrigger{} + } + if err := m.ImageChange.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PostCommit.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CompletionDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomBuildStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExposeDockerSocket", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExposeDockerSocket = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretSpec{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildAPIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerBuildStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCache = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerfilePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookEvent) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitInfo{} + } + if err := m.Git.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitBuildSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URI = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.HTTPProxy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.HTTPSProxy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitHubWebHookCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitHubWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitHubWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitSourceRevision) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitSourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitSourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Author", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Author.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Committer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Committer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FromRef == nil { + m.FromRef = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.FromRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTrigger) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, ImageSourcePath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSourcePath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSourcePath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSourcePath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JenkinsPipelineBuildStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JenkinsfilePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Jenkinsfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Jenkinsfile = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBuildSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceBuildStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scripts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scripts = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeImage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeImage == nil { + m.RuntimeImage = &k8s_io_kubernetes_pkg_api_v1.ObjectReference{} + } + if err := m.RuntimeImage.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeArtifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeArtifacts = append(m.RuntimeArtifacts, ImageSourcePath{}) + if err := m.RuntimeArtifacts[len(m.RuntimeArtifacts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceControlUser) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceControlUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceControlUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceRevision) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitSourceRevision{} + } + if err := m.Git.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebHookTrigger) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebHookTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebHookTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEnv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEnv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 3054 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xce, 0x92, 0x14, 0x45, 0x0d, 0xf5, 0x3b, 0x72, 0x62, 0x59, 0x49, 0x1c, 0x67, 0x93, 0x16, + 0x29, 0x62, 0x93, 0x90, 0x12, 0xa7, 0x0e, 0x12, 0xbb, 0x16, 0x29, 0xff, 0x48, 0x95, 0x6d, 0xf5, + 0x49, 0x89, 0x53, 0x17, 0x6d, 0xb1, 0xa2, 0x86, 0xd4, 0x5a, 0xe4, 0x2e, 0xbb, 0xbb, 0x54, 0x2c, + 0xa0, 0x05, 0x82, 0x16, 0x05, 0xd2, 0x5b, 0xd1, 0xf6, 0xd0, 0x4b, 0x5b, 0x04, 0xe8, 0xcf, 0x21, + 0x87, 0xa2, 0x3f, 0x87, 0x00, 0x3d, 0x15, 0xe8, 0xc1, 0x47, 0x1f, 0x7b, 0x0a, 0x9a, 0xe4, 0xd0, + 0x73, 0xaf, 0x39, 0x75, 0x7e, 0xde, 0xee, 0xce, 0x2e, 0x29, 0xc5, 0x5c, 0x59, 0x46, 0x81, 0x1e, + 0x28, 0x68, 0xdf, 0xbc, 0xf9, 0xde, 0xcc, 0x9b, 0x37, 0xef, 0x6f, 0x97, 0xbc, 0xd9, 0xb2, 0x83, + 0x9d, 0xde, 0x56, 0xa5, 0xe1, 0x76, 0xaa, 0x6e, 0x97, 0x39, 0xfe, 0x8e, 0xdd, 0x0c, 0xaa, 0xae, + 0x67, 0xb7, 0x6c, 0xa7, 0xda, 0xdd, 0x6d, 0x55, 0xb7, 0x7a, 0x76, 0x7b, 0xbb, 0x6a, 0x75, 0xed, + 0xea, 0xde, 0x42, 0xb5, 0xc5, 0x1c, 0xe6, 0x59, 0x01, 0xdb, 0xae, 0x74, 0x3d, 0x37, 0x70, 0xe9, + 0xd9, 0x78, 0x76, 0x25, 0x9a, 0x5d, 0x51, 0xb3, 0x2b, 0x7c, 0x76, 0x45, 0xce, 0xae, 0xf0, 0xd9, + 0x95, 0xbd, 0x85, 0xf9, 0x73, 0x9a, 0xac, 0x96, 0xdb, 0x72, 0xab, 0x12, 0x64, 0xab, 0xd7, 0x94, + 0x4f, 0xf2, 0x41, 0xfe, 0xa7, 0xc0, 0xe7, 0xcf, 0xef, 0x5e, 0xf0, 0x2b, 0xb6, 0x5b, 0xdd, 0xed, + 0x6d, 0x31, 0xcf, 0x61, 0x01, 0xf3, 0xe5, 0x82, 0xc4, 0x52, 0x7a, 0xce, 0x1e, 0xf3, 0x7c, 0xdb, + 0x75, 0xd8, 0x76, 0x7a, 0x4d, 0xf3, 0x67, 0x0f, 0x9e, 0xd6, 0xbf, 0x83, 0xf9, 0x73, 0x83, 0xb9, + 0xbd, 0x9e, 0x13, 0xd8, 0x1d, 0xd6, 0xc7, 0xbe, 0x30, 0x98, 0xbd, 0x17, 0xd8, 0xed, 0xaa, 0xed, + 0x04, 0x7e, 0xe0, 0xa5, 0xa7, 0x98, 0x1f, 0x15, 0xc8, 0xa9, 0x9a, 0xed, 0x58, 0xde, 0x7e, 0x4d, + 0x28, 0x03, 0xd8, 0xf7, 0x7a, 0xcc, 0x0f, 0x6e, 0x75, 0x03, 0xbe, 0x7c, 0x9f, 0xbe, 0x43, 0x4a, + 0x1d, 0x16, 0x58, 0xdb, 0x56, 0x60, 0xcd, 0x19, 0x67, 0x8c, 0x97, 0xca, 0x8b, 0x2f, 0x55, 0x94, + 0x8c, 0x4a, 0x2c, 0x43, 0xaa, 0x52, 0x29, 0xb1, 0x72, 0x6b, 0xeb, 0x2e, 0x6b, 0x04, 0x37, 0xf8, + 0x9c, 0x1a, 0xbd, 0xff, 0xf1, 0x73, 0x4f, 0x7c, 0xfa, 0xf1, 0x73, 0x24, 0xa6, 0x41, 0x84, 0x46, + 0xbf, 0x4c, 0x8a, 0x96, 0x7f, 0xd5, 0x6e, 0xb3, 0xb9, 0x1c, 0xc7, 0x1d, 0xab, 0x4d, 0x22, 0x77, + 0x71, 0x49, 0x52, 0x01, 0x47, 0xe9, 0x6b, 0x64, 0xd2, 0x63, 0x7b, 0xb6, 0xd0, 0x66, 0xdd, 0xed, + 0x74, 0xec, 0x60, 0x2e, 0x9f, 0xe4, 0x57, 0x54, 0x48, 0x71, 0xd1, 0xd7, 0xc9, 0x54, 0x48, 0xb9, + 0xc1, 0x7c, 0xdf, 0x6a, 0xb1, 0xb9, 0x82, 0x9c, 0x38, 0x85, 0x13, 0x47, 0x91, 0x0c, 0x69, 0x3e, + 0x5a, 0x23, 0x34, 0x24, 0x2d, 0xf5, 0x82, 0x1d, 0xd7, 0xbb, 0x69, 0x75, 0xd8, 0xdc, 0x88, 0x9c, + 0x1d, 0x6d, 0x2a, 0x1e, 0x81, 0x01, 0xdc, 0xf4, 0x0a, 0x99, 0x4d, 0x52, 0xaf, 0x74, 0x2c, 0xbb, + 0x3d, 0x57, 0x94, 0x20, 0xb3, 0x08, 0x52, 0xd6, 0x86, 0x60, 0x10, 0x3f, 0xfd, 0x3a, 0x79, 0x32, + 0xb9, 0xaf, 0x80, 0xa9, 0xd5, 0x8c, 0x4a, 0xa0, 0x27, 0x11, 0x68, 0x22, 0x31, 0x08, 0x83, 0xe7, + 0xd0, 0x9b, 0xe4, 0xa9, 0xbe, 0x01, 0xb5, 0xac, 0x92, 0x44, 0x7b, 0x0a, 0xd1, 0x26, 0x93, 0xa3, + 0x70, 0xc0, 0x2c, 0xf3, 0x0d, 0x32, 0xa3, 0x59, 0xce, 0x86, 0xdb, 0xf3, 0x1a, 0x4c, 0x3b, 0x57, + 0xe3, 0xb0, 0x73, 0x35, 0x7f, 0x93, 0x23, 0x23, 0x72, 0xde, 0x31, 0xda, 0xd8, 0x37, 0x49, 0xc1, + 0xef, 0xb2, 0x86, 0xb4, 0xb0, 0xf2, 0xe2, 0x57, 0x2b, 0xc3, 0xb8, 0x83, 0x8a, 0xda, 0x14, 0x9f, + 0x5e, 0x1b, 0x47, 0x21, 0x05, 0xf1, 0x04, 0x12, 0x92, 0x5a, 0xa4, 0xe8, 0x07, 0x56, 0xd0, 0xf3, + 0xa5, 0x39, 0x96, 0x17, 0x5f, 0xcf, 0x02, 0x2e, 0x01, 0x62, 0x0d, 0xa9, 0x67, 0x40, 0x60, 0xf3, + 0x4f, 0x39, 0x52, 0x96, 0x7c, 0x75, 0xd7, 0x69, 0xda, 0xad, 0x63, 0xd4, 0xd3, 0x77, 0x13, 0x7a, + 0xba, 0x98, 0x61, 0x2b, 0x6a, 0x89, 0x07, 0x6a, 0xab, 0x95, 0xd2, 0xd6, 0xd7, 0xb2, 0x8b, 0x38, + 0x5c, 0x67, 0x0f, 0x0c, 0x32, 0xa5, 0x71, 0xaf, 0xd9, 0x7e, 0x40, 0xbf, 0xdd, 0xa7, 0xb7, 0xea, + 0x21, 0x7a, 0xd3, 0x7c, 0x77, 0x45, 0x4c, 0x97, 0xea, 0x9b, 0x46, 0x71, 0xa5, 0x90, 0xa2, 0x29, + 0xef, 0x3b, 0x64, 0xc4, 0x0e, 0x58, 0xc7, 0xe7, 0xda, 0xcb, 0x67, 0x34, 0x04, 0xb5, 0xd8, 0xda, + 0x04, 0x4a, 0x19, 0x59, 0x11, 0x78, 0xa0, 0x60, 0xcd, 0x3f, 0xe7, 0x12, 0x5b, 0x12, 0x5a, 0xa5, + 0x0e, 0x29, 0x05, 0x1c, 0xb0, 0xc5, 0x57, 0xca, 0xb7, 0x24, 0xc4, 0x5e, 0xce, 0x20, 0x76, 0x53, + 0x41, 0xac, 0xbb, 0x6d, 0xbb, 0xb1, 0x1f, 0xef, 0x11, 0xc9, 0x3e, 0x44, 0x32, 0xe8, 0x12, 0x19, + 0xe3, 0x21, 0x47, 0x31, 0xa2, 0xbf, 0x7e, 0x01, 0xd9, 0xc7, 0x20, 0x1c, 0xf8, 0x9c, 0x7b, 0x0e, + 0x15, 0x43, 0x42, 0x0a, 0xc4, 0xb3, 0x68, 0x9b, 0x10, 0xbe, 0xb4, 0x8e, 0xeb, 0x88, 0x0d, 0xa0, + 0x19, 0x5c, 0x18, 0x6e, 0xd1, 0xf5, 0x68, 0x7e, 0x6c, 0xcf, 0x31, 0x0d, 0x34, 0x7c, 0x73, 0x95, + 0xbb, 0xa6, 0xb4, 0xd1, 0xd0, 0xf3, 0xa4, 0xdc, 0xb6, 0xfc, 0xe0, 0x6d, 0x75, 0xbe, 0xd2, 0x16, + 0xf2, 0xb1, 0x2f, 0x5e, 0x8b, 0x87, 0x40, 0xe7, 0x33, 0xff, 0x61, 0x90, 0x31, 0x09, 0xf6, 0x38, + 0xac, 0xe9, 0x9d, 0xa4, 0x35, 0xbd, 0x92, 0xe1, 0x58, 0x0f, 0xb0, 0x23, 0x42, 0x4a, 0x6a, 0x17, + 0x6e, 0xcb, 0x7c, 0xbf, 0x80, 0x36, 0xc5, 0x1f, 0xc2, 0x50, 0x5f, 0x25, 0x63, 0x0d, 0xd7, 0x09, + 0x2c, 0x9b, 0xe7, 0x07, 0xe8, 0xbb, 0x67, 0xc2, 0x33, 0xae, 0x87, 0x03, 0x10, 0xf3, 0x08, 0x4f, + 0xdf, 0x74, 0xdb, 0x6d, 0xf7, 0x5d, 0x69, 0x11, 0xa5, 0xf8, 0x4e, 0x5e, 0x95, 0x54, 0xc0, 0x51, + 0x7a, 0x96, 0x94, 0xba, 0x22, 0x82, 0xb8, 0x78, 0xfd, 0x4b, 0xb1, 0x02, 0xd6, 0x91, 0x0e, 0x11, + 0x07, 0x7d, 0x95, 0x8c, 0xfb, 0xb6, 0xd3, 0x60, 0x1b, 0x8c, 0x4b, 0xda, 0xf6, 0x65, 0xd0, 0xce, + 0xd7, 0xa6, 0x39, 0xf7, 0xf8, 0x86, 0x46, 0x87, 0x04, 0x17, 0x57, 0xdb, 0x98, 0x7c, 0xde, 0xb4, + 0x31, 0x52, 0x97, 0x17, 0x5f, 0x7e, 0xc8, 0x63, 0x11, 0x53, 0x6a, 0x13, 0x62, 0x97, 0x1b, 0x21, + 0x02, 0xc4, 0x60, 0x74, 0x91, 0x10, 0x91, 0x6a, 0x71, 0xff, 0xd2, 0xe9, 0xfa, 0x32, 0x7e, 0x97, + 0x62, 0xeb, 0xdb, 0x8c, 0x46, 0x40, 0xe3, 0xa2, 0x2f, 0x93, 0x31, 0xae, 0xa3, 0xf6, 0x1a, 0x57, + 0x93, 0x2f, 0x23, 0x75, 0x5e, 0x09, 0xd8, 0x0c, 0x89, 0x10, 0x8f, 0xd3, 0x0a, 0x21, 0x6d, 0x9b, + 0x87, 0xd5, 0xda, 0x3e, 0x5f, 0xa1, 0x8c, 0xc4, 0xf9, 0xda, 0xa4, 0x00, 0x5f, 0x8b, 0xa8, 0xa0, + 0x71, 0x08, 0xb5, 0x3b, 0xee, 0xbb, 0x16, 0x4f, 0x84, 0xc6, 0x92, 0x6a, 0xbf, 0xe9, 0xde, 0xe6, + 0x54, 0xc0, 0x51, 0xfa, 0x25, 0x32, 0x8a, 0x9b, 0x9c, 0x23, 0x12, 0xb4, 0x2c, 0x92, 0x9e, 0xd0, + 0xc2, 0xc3, 0x31, 0xf3, 0x23, 0x03, 0xa3, 0xcc, 0xad, 0x5e, 0xd0, 0xed, 0x05, 0x3c, 0x71, 0xc9, + 0x05, 0x2e, 0x5a, 0xf6, 0xb9, 0x87, 0x89, 0x2f, 0xc0, 0x9a, 0xcc, 0x63, 0x5c, 0x5d, 0xb5, 0x22, + 0x17, 0x90, 0xdb, 0x74, 0x81, 0x03, 0xd0, 0x2d, 0x42, 0xba, 0x3d, 0x7f, 0x87, 0x9f, 0x8f, 0xc7, + 0x02, 0x0c, 0x2c, 0x8b, 0x87, 0xc3, 0xad, 0xb9, 0x0d, 0xab, 0x9d, 0xc6, 0x94, 0x9a, 0x58, 0x8f, + 0x90, 0x40, 0x43, 0x35, 0x7f, 0x68, 0x90, 0x59, 0xb9, 0xf4, 0x75, 0xd7, 0x0f, 0x54, 0x6e, 0x22, + 0xbd, 0x23, 0xdf, 0xb9, 0x70, 0x05, 0x96, 0xb3, 0x2d, 0x9d, 0xe3, 0x98, 0xda, 0x79, 0x5d, 0x91, + 0x20, 0x1c, 0xa3, 0xcf, 0x90, 0x82, 0xe5, 0xb5, 0xd4, 0x4d, 0x1b, 0xab, 0x95, 0x44, 0xc8, 0x5a, + 0xe2, 0xcf, 0x20, 0xa9, 0x42, 0xcd, 0x7e, 0xc3, 0xb3, 0xbb, 0x7d, 0xf9, 0xe6, 0x86, 0xa4, 0x02, + 0x8e, 0x9a, 0x9f, 0x8d, 0x90, 0x71, 0x3d, 0x73, 0x3e, 0xc6, 0x30, 0xdd, 0x24, 0xa5, 0x30, 0x13, + 0x43, 0x8d, 0xbe, 0x39, 0x9c, 0x7b, 0x50, 0x29, 0x1a, 0x20, 0x46, 0x6d, 0x5c, 0x5c, 0xc1, 0xf0, + 0x09, 0x22, 0x6c, 0xea, 0x92, 0x69, 0xf4, 0xfc, 0x6c, 0xbb, 0xb6, 0xbf, 0xd2, 0x11, 0xb9, 0x73, + 0x3e, 0x8b, 0x41, 0x9c, 0xe0, 0x02, 0xa6, 0x37, 0x53, 0x50, 0xd0, 0x07, 0xce, 0xb3, 0xdc, 0x42, + 0xd3, 0x73, 0x3b, 0xf2, 0xae, 0x0f, 0x2d, 0x44, 0x1e, 0xdc, 0x55, 0x3e, 0x1d, 0x24, 0x08, 0x6d, + 0x90, 0xe2, 0x96, 0xcc, 0x4a, 0xd1, 0x0f, 0x0c, 0x9b, 0x6b, 0xa4, 0x33, 0xda, 0x1a, 0x11, 0xa7, + 0xae, 0xc8, 0x80, 0xd0, 0x74, 0x21, 0x19, 0x4a, 0x8a, 0xf2, 0x82, 0x4d, 0x1d, 0x16, 0x46, 0x68, + 0x9d, 0xe4, 0x99, 0xb3, 0xc7, 0xdd, 0x81, 0xf0, 0xeb, 0x2f, 0x1e, 0xbe, 0xc7, 0x2b, 0xce, 0xde, + 0xdb, 0x96, 0x57, 0x2b, 0xa3, 0x39, 0xe4, 0xf9, 0x33, 0x88, 0xd9, 0x74, 0x8f, 0x94, 0x35, 0xed, + 0x71, 0x6f, 0x91, 0xcf, 0x98, 0x4d, 0xe1, 0xa9, 0xd4, 0xad, 0x9e, 0xcf, 0xe2, 0x18, 0xa8, 0x9d, + 0x15, 0xe8, 0x82, 0xcc, 0x5f, 0x8d, 0xa0, 0x97, 0xc0, 0x2c, 0xff, 0x15, 0x52, 0x08, 0xf6, 0xbb, + 0x61, 0x8e, 0xff, 0x5c, 0x98, 0xf2, 0x6d, 0x72, 0x1a, 0x4f, 0x03, 0xa6, 0x34, 0x56, 0x41, 0x02, + 0xc9, 0xac, 0x9d, 0x4c, 0xee, 0xf8, 0x4e, 0x86, 0xbb, 0xd3, 0x6d, 0xb7, 0xb1, 0xcb, 0xbc, 0xa6, + 0xa8, 0x41, 0xf0, 0xee, 0x8a, 0x2b, 0xb5, 0x1c, 0x51, 0x41, 0xe3, 0xa0, 0xb7, 0x49, 0x9e, 0xaf, + 0x02, 0x4d, 0x6f, 0xc8, 0xfb, 0x74, 0x8d, 0xfb, 0x64, 0x6d, 0x39, 0xa3, 0xe2, 0xa8, 0x38, 0x0d, + 0x04, 0xa2, 0xa8, 0x10, 0x6c, 0x61, 0xdd, 0x3e, 0xb7, 0xc3, 0x0c, 0x89, 0xa1, 0xbc, 0x19, 0x08, + 0x1c, 0xf9, 0x1e, 0x49, 0xe4, 0xd9, 0xae, 0x02, 0x16, 0xb1, 0x49, 0x84, 0x63, 0x76, 0x2f, 0x58, + 0xb6, 0x3d, 0xac, 0x2d, 0xb5, 0xcc, 0x28, 0x1c, 0x01, 0x8d, 0x8b, 0xee, 0xf0, 0xf8, 0x2a, 0x51, + 0xd1, 0x35, 0x8f, 0x66, 0x76, 0xcd, 0x2a, 0x26, 0x6b, 0x58, 0x90, 0x40, 0xa6, 0x77, 0xc9, 0xa8, + 0x2f, 0xff, 0xf3, 0xb3, 0xd9, 0xa9, 0x82, 0xd1, 0x15, 0x1c, 0x95, 0xee, 0x6a, 0xc8, 0x87, 0x50, + 0x80, 0xf9, 0x9f, 0x30, 0x47, 0x93, 0x01, 0x20, 0x99, 0x6b, 0x1a, 0xc7, 0x9b, 0x6b, 0xa6, 0xef, + 0x64, 0xee, 0x71, 0xdd, 0xc9, 0x0f, 0xa3, 0x3b, 0xa9, 0xd2, 0xdb, 0x05, 0x32, 0xd2, 0xdd, 0xb1, + 0xfc, 0xf0, 0x52, 0x3e, 0x1d, 0x66, 0x81, 0xeb, 0x82, 0xc8, 0x6f, 0x25, 0x51, 0xb1, 0x52, 0x3c, + 0x81, 0xe2, 0x94, 0x39, 0x9f, 0xc5, 0xcf, 0xb2, 0xdd, 0x66, 0xdb, 0x98, 0xc5, 0xc5, 0x39, 0x5f, + 0x38, 0x00, 0x31, 0x0f, 0x7d, 0x8d, 0x14, 0x3d, 0x66, 0xf9, 0xdc, 0xe5, 0xa9, 0x9b, 0x75, 0x3a, + 0xb4, 0x4c, 0x90, 0xd4, 0xcf, 0x85, 0x45, 0xa8, 0x8a, 0x4c, 0x3e, 0x03, 0x72, 0xd3, 0xaf, 0x90, + 0xd1, 0xce, 0xe1, 0x5d, 0x98, 0x70, 0x9c, 0xd7, 0x8a, 0x93, 0x3c, 0x8d, 0xf2, 0x82, 0x28, 0xb7, + 0xca, 0x92, 0xcf, 0x51, 0xd1, 0xc6, 0xd8, 0x48, 0xc0, 0x40, 0x0a, 0x96, 0x9f, 0xdb, 0x2c, 0x3f, + 0x9c, 0x6e, 0x9b, 0x89, 0xfc, 0x37, 0x96, 0x56, 0x1c, 0x5e, 0xda, 0x49, 0x2e, 0x6d, 0xb6, 0xde, + 0x8f, 0x05, 0x83, 0x04, 0xd0, 0x8b, 0xa4, 0xb4, 0xdd, 0xf3, 0x2c, 0x41, 0xc4, 0xe4, 0xf0, 0xf9, + 0x30, 0x1f, 0x5e, 0x46, 0x3a, 0xd7, 0xe3, 0x84, 0xc8, 0x27, 0x2b, 0x21, 0x01, 0xa2, 0x29, 0x3c, + 0xb3, 0x9a, 0x77, 0x65, 0xaa, 0xa6, 0x1c, 0x9a, 0x8a, 0xa9, 0xe1, 0xa5, 0xc4, 0x4e, 0x8e, 0x89, + 0x80, 0xf3, 0xb7, 0x0e, 0xe4, 0x84, 0x43, 0x50, 0xe8, 0x37, 0x48, 0xb1, 0x21, 0x2b, 0x27, 0x99, + 0x63, 0x0e, 0x1d, 0x92, 0x89, 0xea, 0xcb, 0x09, 0x00, 0x40, 0x20, 0xf3, 0xdf, 0x05, 0x32, 0x81, + 0xd6, 0x2a, 0xda, 0x8f, 0xad, 0x7d, 0x5e, 0x8e, 0xe9, 0x31, 0xe4, 0xf9, 0x54, 0x0c, 0x99, 0x49, + 0x30, 0x6b, 0x51, 0xe4, 0x07, 0x64, 0x52, 0xb9, 0xef, 0x70, 0x0c, 0xa3, 0xc9, 0xd2, 0x70, 0x37, + 0x4e, 0xed, 0x3b, 0x21, 0x44, 0x59, 0xcd, 0x72, 0x02, 0x1c, 0x52, 0xc2, 0x84, 0x78, 0xf4, 0x72, + 0xa1, 0xf8, 0x7c, 0x16, 0xf1, 0xe8, 0xd1, 0xfa, 0xc5, 0x6f, 0x24, 0xc0, 0x21, 0x25, 0x4c, 0x88, + 0x6f, 0xf4, 0xfc, 0xc0, 0xed, 0x44, 0xe2, 0x0b, 0x59, 0xc4, 0xd7, 0x25, 0xc6, 0x00, 0xf1, 0xf5, + 0x04, 0x38, 0xa4, 0x84, 0xd1, 0x0f, 0x0c, 0x72, 0xf2, 0x2e, 0x73, 0x76, 0x6d, 0xc7, 0x5f, 0xb7, + 0xbb, 0xac, 0xcd, 0x2b, 0x98, 0x68, 0x21, 0xea, 0x9a, 0xae, 0x0e, 0xb7, 0x90, 0xd5, 0x24, 0x58, + 0x72, 0x45, 0x4f, 0xf3, 0x15, 0x9d, 0x5c, 0x1d, 0x2c, 0x0e, 0x0e, 0x5a, 0x87, 0xf9, 0xb7, 0x3c, + 0x16, 0xff, 0xba, 0x3f, 0xd5, 0x3d, 0x90, 0xf1, 0x05, 0x1e, 0x88, 0xeb, 0x58, 0x76, 0xc9, 0xed, + 0xc6, 0x6d, 0xb6, 0x75, 0xdd, 0x75, 0x77, 0xb3, 0x59, 0xd8, 0xb5, 0x04, 0x86, 0xf2, 0xea, 0x52, + 0xc7, 0xc9, 0x01, 0x48, 0x09, 0xa3, 0xfb, 0x64, 0x42, 0xc9, 0x09, 0xa5, 0x2b, 0x03, 0xbb, 0x3c, + 0x74, 0x6e, 0x72, 0x3d, 0x82, 0x50, 0xc2, 0x67, 0x44, 0xa7, 0x38, 0x41, 0x87, 0xa4, 0x24, 0xfa, + 0x9e, 0x41, 0xa6, 0x65, 0x6e, 0x51, 0xdf, 0xb1, 0x9c, 0x96, 0x3a, 0x0d, 0x34, 0xb0, 0x4b, 0x19, + 0xd2, 0x17, 0x85, 0xa2, 0x84, 0xcb, 0x5a, 0x60, 0x25, 0x85, 0x0d, 0x7d, 0xd2, 0xcc, 0x5f, 0xe4, + 0x09, 0xed, 0xef, 0x4e, 0xd1, 0x57, 0x13, 0xce, 0xe2, 0x4c, 0xca, 0x59, 0x4c, 0xeb, 0x33, 0x34, + 0x5f, 0xd1, 0x22, 0x45, 0xb5, 0xea, 0x6c, 0xf5, 0x12, 0xaa, 0x05, 0x71, 0x07, 0xe9, 0x0f, 0xe1, + 0x45, 0xae, 0x83, 0xa7, 0x88, 0xa7, 0x75, 0x34, 0x49, 0x83, 0xcc, 0x24, 0x14, 0x40, 0x7d, 0x52, + 0xd6, 0xb4, 0x86, 0xc7, 0x73, 0x39, 0xf3, 0xf1, 0x84, 0x32, 0x65, 0xf5, 0xa2, 0xd1, 0x41, 0x97, + 0x62, 0xfe, 0xba, 0x48, 0xb4, 0xfc, 0x87, 0x5e, 0xe2, 0x5e, 0x90, 0x79, 0x7b, 0x76, 0x83, 0x2d, + 0x35, 0x1a, 0x6e, 0xcf, 0x09, 0xf0, 0x60, 0xa2, 0x57, 0x08, 0x1b, 0x89, 0x51, 0x48, 0x71, 0xcb, + 0xf6, 0xb9, 0x74, 0x6c, 0x78, 0x30, 0x99, 0xda, 0xe7, 0xa9, 0xe4, 0x18, 0xab, 0x5b, 0x04, 0x4e, + 0x54, 0xcb, 0xf9, 0x63, 0xac, 0x96, 0x6d, 0x52, 0xf2, 0x93, 0xbe, 0xf8, 0x8d, 0x4c, 0xef, 0x02, + 0xd0, 0xe7, 0x45, 0xbd, 0xb1, 0xc8, 0xd1, 0x45, 0xf0, 0x42, 0x6b, 0x2a, 0x68, 0xa3, 0xaf, 0xcd, + 0xa2, 0x35, 0x95, 0x11, 0xc4, 0x5a, 0x53, 0xcf, 0x80, 0xc0, 0xbc, 0x46, 0x1b, 0xf3, 0x98, 0xd2, + 0xa0, 0x8f, 0xa9, 0xd0, 0x17, 0xd4, 0x06, 0x80, 0xec, 0xa2, 0xff, 0x61, 0x7b, 0xac, 0xc3, 0x9c, + 0xc0, 0x8f, 0xb3, 0xc8, 0x70, 0xd4, 0x87, 0x18, 0x97, 0xf6, 0x08, 0xe9, 0x46, 0x2d, 0x1b, 0xac, + 0x40, 0x96, 0x32, 0xec, 0x25, 0xd9, 0xf7, 0x89, 0x13, 0xf5, 0x98, 0x0e, 0x9a, 0x20, 0xfa, 0x2d, + 0x72, 0x2a, 0xce, 0xc7, 0x96, 0x99, 0xb5, 0x2d, 0xc3, 0x06, 0xf6, 0x19, 0x55, 0xe3, 0xed, 0x59, + 0x3e, 0xfd, 0x54, 0xfd, 0x20, 0x26, 0x38, 0x78, 0xbe, 0xf9, 0xd7, 0x02, 0x99, 0x1d, 0x10, 0x55, + 0xe9, 0x2d, 0xec, 0x6d, 0x64, 0xea, 0xa8, 0x45, 0xef, 0x52, 0xb4, 0xfe, 0x86, 0xec, 0xac, 0xb5, + 0xdb, 0x8f, 0xaa, 0xb3, 0x16, 0x22, 0x81, 0x86, 0x1a, 0xf6, 0x2a, 0xf2, 0x47, 0xea, 0x55, 0xac, + 0x12, 0xca, 0xee, 0x71, 0xf5, 0x33, 0xcc, 0xa8, 0xc4, 0x5f, 0x55, 0x68, 0x97, 0x6a, 0xf3, 0xc8, + 0x4d, 0xaf, 0xf4, 0x71, 0xc0, 0x80, 0x59, 0xa2, 0x50, 0x69, 0xba, 0xdc, 0x76, 0xc4, 0x7a, 0xa5, + 0xf1, 0x6b, 0x85, 0xca, 0xd5, 0x70, 0x00, 0x62, 0x1e, 0x6e, 0xc7, 0x51, 0xf1, 0x59, 0x94, 0xbb, + 0xb8, 0x90, 0xa5, 0xf8, 0x94, 0x66, 0x75, 0x60, 0xd5, 0x49, 0x97, 0xc8, 0x94, 0x9c, 0xb4, 0xb4, + 0xbe, 0x12, 0x76, 0x82, 0xd4, 0x7b, 0xd9, 0x93, 0x38, 0x45, 0x35, 0x42, 0xe2, 0x61, 0x48, 0xf3, + 0x9b, 0x7f, 0xc8, 0x93, 0xd9, 0x01, 0xa9, 0x68, 0xd4, 0x12, 0x33, 0x1e, 0x45, 0x4b, 0xec, 0x71, + 0x98, 0x0c, 0xcf, 0xaf, 0x1c, 0xb7, 0x6e, 0x35, 0x76, 0x18, 0x36, 0xf9, 0x23, 0xb5, 0xdd, 0x54, + 0x64, 0x08, 0xc7, 0x43, 0xeb, 0x2a, 0x1c, 0xc9, 0xba, 0x86, 0xb6, 0x88, 0x4b, 0x61, 0xdd, 0x20, + 0xda, 0x3e, 0xeb, 0x56, 0xb0, 0x83, 0x0d, 0x93, 0x28, 0x64, 0x2d, 0x27, 0x46, 0x21, 0xc5, 0x6d, + 0xfe, 0xce, 0x20, 0xb3, 0x03, 0x52, 0xba, 0x44, 0x9c, 0x31, 0x8e, 0x31, 0xce, 0x88, 0x86, 0x74, + 0x7c, 0x80, 0x7a, 0x43, 0x5a, 0x1d, 0x06, 0x8e, 0x9a, 0x9f, 0xf4, 0xad, 0xf3, 0xca, 0x1e, 0xf7, + 0xc9, 0xd9, 0x5a, 0x76, 0xeb, 0xaa, 0x3b, 0xa6, 0x4c, 0xe6, 0xfc, 0xd0, 0x19, 0xe8, 0x8a, 0xd3, + 0x74, 0x53, 0x6d, 0xb1, 0x47, 0xe1, 0x5a, 0xcc, 0x0f, 0x0d, 0x32, 0x99, 0x6c, 0xbe, 0xd1, 0x67, + 0x49, 0xbe, 0xe7, 0xd9, 0xb8, 0xbb, 0x68, 0xc6, 0x5b, 0xb0, 0x02, 0x82, 0x2e, 0x86, 0x3d, 0xd6, + 0x44, 0xd5, 0x45, 0xc3, 0xdc, 0xb4, 0x41, 0xd0, 0xc5, 0x1b, 0x9b, 0x9d, 0x20, 0xe8, 0xae, 0x7b, + 0xee, 0xbd, 0x7d, 0x6c, 0x6d, 0xc8, 0x37, 0x36, 0xd7, 0x37, 0x37, 0xd7, 0x25, 0x11, 0xe2, 0x71, + 0xd1, 0x62, 0x14, 0x0f, 0xbe, 0xe2, 0x2e, 0xc4, 0x2d, 0x46, 0xc1, 0xbd, 0xa1, 0xd8, 0x35, 0x0e, + 0xf3, 0xb7, 0x06, 0xa1, 0xfd, 0xe9, 0xf8, 0xff, 0x9c, 0xe1, 0xfc, 0x2c, 0x47, 0x46, 0xf1, 0xcc, + 0xe8, 0xf7, 0x79, 0x09, 0x94, 0xd0, 0x6f, 0xb6, 0x15, 0xa6, 0x1a, 0xa4, 0xd1, 0x55, 0x4b, 0xd2, + 0x21, 0x25, 0x8b, 0xbe, 0x6f, 0x90, 0x19, 0x4e, 0x4a, 0xee, 0x2f, 0x5b, 0xd3, 0xf8, 0x5a, 0x1a, + 0xa6, 0x76, 0x0a, 0x17, 0x31, 0xd3, 0x37, 0x04, 0xfd, 0x42, 0xcd, 0xbf, 0xe7, 0x48, 0x3f, 0xa3, + 0x50, 0x69, 0x43, 0x25, 0x2f, 0xc6, 0xc0, 0x8f, 0x91, 0x70, 0x54, 0xd4, 0x1f, 0x96, 0xfc, 0x9a, + 0x27, 0xdb, 0xe2, 0x95, 0x54, 0xd1, 0xc8, 0xf5, 0xdc, 0xf6, 0x5b, 0x3c, 0x85, 0xd6, 0xbe, 0xa6, + 0x91, 0xb0, 0x80, 0xf0, 0xb4, 0x2b, 0x5e, 0xde, 0xe2, 0xc7, 0x39, 0xd9, 0xbe, 0xb1, 0xe8, 0x97, + 0xa5, 0xbd, 0xfd, 0x45, 0x64, 0x88, 0x85, 0x0c, 0xd1, 0xd1, 0x33, 0x7f, 0xce, 0xab, 0xca, 0x74, + 0x3d, 0x28, 0xe6, 0xcb, 0xfa, 0x62, 0x65, 0x39, 0x5d, 0x8f, 0xaf, 0x28, 0x32, 0x84, 0xe3, 0x74, + 0x93, 0x8c, 0x8a, 0x30, 0x06, 0x78, 0x7f, 0x87, 0x0e, 0x87, 0xf2, 0xf5, 0xdf, 0x55, 0x85, 0x00, + 0x21, 0x94, 0xf9, 0x17, 0x7e, 0x2b, 0xfb, 0xcb, 0x20, 0xee, 0xf1, 0x4e, 0x88, 0xb7, 0x36, 0x51, + 0xd7, 0x75, 0x25, 0xb1, 0xc8, 0x67, 0x70, 0x91, 0x27, 0xd6, 0x06, 0xf0, 0xc0, 0xc0, 0x99, 0x51, + 0x28, 0xcf, 0x3d, 0x82, 0x50, 0x6e, 0xfe, 0x3e, 0x47, 0xca, 0xda, 0xab, 0x81, 0xe3, 0x48, 0x2f, + 0x47, 0xba, 0x3c, 0xdc, 0x85, 0x1f, 0x20, 0x5c, 0xcc, 0xfc, 0xd6, 0x42, 0x04, 0xcd, 0xf8, 0x53, + 0x04, 0xf1, 0xe4, 0x83, 0x82, 0x4e, 0xe5, 0x23, 0xf9, 0xe3, 0xc8, 0x47, 0xcc, 0x1f, 0x1b, 0x64, + 0x2a, 0xb5, 0x1a, 0xf1, 0xbe, 0xc4, 0x8f, 0x9e, 0xf0, 0x44, 0xa3, 0xa2, 0x21, 0xe6, 0x03, 0x8d, + 0x4b, 0xa6, 0x0d, 0xcc, 0x0f, 0x6c, 0x47, 0x76, 0x5f, 0xc5, 0x7b, 0x96, 0x5c, 0x2a, 0x6d, 0x48, + 0x8c, 0x42, 0x8a, 0xdb, 0xfc, 0xa5, 0x41, 0x9e, 0x39, 0xac, 0xc9, 0x25, 0x92, 0x48, 0xec, 0x64, + 0x45, 0x89, 0x89, 0x91, 0x4c, 0x22, 0x57, 0x93, 0xc3, 0x90, 0xe6, 0x17, 0x1f, 0xb6, 0x68, 0x24, + 0x5c, 0x60, 0xf4, 0x02, 0x41, 0x9b, 0x0e, 0x3a, 0x9f, 0xf9, 0x47, 0xee, 0x66, 0xfb, 0x5e, 0xb2, + 0xd0, 0x3b, 0x51, 0xb8, 0x30, 0xb2, 0x1f, 0xcc, 0xe0, 0x10, 0x73, 0x64, 0x65, 0xf2, 0xb8, 0x4f, + 0xe2, 0xcc, 0x9c, 0xb6, 0xc9, 0xb8, 0x02, 0x4e, 0xc4, 0xa8, 0x2c, 0x0b, 0x3e, 0x81, 0x0b, 0x18, + 0xdf, 0xd0, 0xf0, 0x20, 0x81, 0x2e, 0x32, 0xce, 0x8e, 0x68, 0x5e, 0xc8, 0x23, 0xca, 0x25, 0x3f, + 0x90, 0xb9, 0x11, 0x0e, 0x40, 0xcc, 0x63, 0xfe, 0x64, 0x84, 0xcc, 0x0e, 0xe8, 0xf3, 0xfe, 0x1f, + 0x97, 0x84, 0xdc, 0xe9, 0xab, 0xcf, 0x26, 0xfc, 0x74, 0xd0, 0x50, 0x5f, 0x55, 0x88, 0xda, 0x4a, + 0xfd, 0x23, 0xde, 0xb0, 0xdb, 0x4e, 0x43, 0xf5, 0x13, 0xac, 0x30, 0xc3, 0x57, 0x3d, 0xaa, 0x98, + 0x0c, 0x3a, 0x4f, 0xb2, 0x24, 0x28, 0x3e, 0x54, 0x91, 0x38, 0x8e, 0x5f, 0x52, 0xab, 0x8f, 0x1c, + 0x46, 0xb3, 0x1c, 0x88, 0x7c, 0x0d, 0x0a, 0x1a, 0x0c, 0x24, 0x40, 0xe9, 0x8f, 0x78, 0xf4, 0x43, + 0xc2, 0x92, 0x17, 0xd8, 0x4d, 0xab, 0x11, 0xbd, 0x10, 0x3d, 0xa2, 0x73, 0x9d, 0xc3, 0xcd, 0x4d, + 0x43, 0x0a, 0x1e, 0xfa, 0x04, 0x9a, 0x77, 0xf8, 0x55, 0x4f, 0x47, 0x78, 0x7a, 0x86, 0x14, 0x1c, + 0xf1, 0x31, 0xb1, 0xf2, 0x37, 0x91, 0x65, 0xc9, 0x6f, 0x88, 0xe5, 0x08, 0x7d, 0x81, 0x8c, 0x30, + 0xf9, 0x85, 0xb0, 0xb2, 0xf7, 0xc8, 0x9d, 0xab, 0x0f, 0x83, 0xd5, 0x98, 0xf9, 0x01, 0xcf, 0xc6, + 0x53, 0x09, 0x52, 0xa6, 0x62, 0xe3, 0x8e, 0x5e, 0x6c, 0x1c, 0x39, 0xcf, 0x4b, 0x94, 0x1d, 0x66, + 0x93, 0x4c, 0x26, 0x7b, 0xac, 0x5a, 0x5a, 0x6c, 0x1c, 0x96, 0x16, 0x8b, 0xcf, 0xd7, 0x2c, 0xf1, + 0x1d, 0x1b, 0x37, 0x62, 0x7c, 0x45, 0x1a, 0xb5, 0xe8, 0x96, 0x90, 0x0e, 0x11, 0x47, 0xed, 0xc5, + 0xfb, 0x9f, 0x9c, 0x7e, 0xe2, 0x01, 0xff, 0xfd, 0x93, 0xff, 0xde, 0xfb, 0xf4, 0xb4, 0x71, 0x9f, + 0xff, 0x1e, 0xf0, 0xdf, 0xbf, 0xf8, 0xef, 0xa7, 0x9f, 0x9d, 0x7e, 0xe2, 0x4e, 0x6e, 0x6f, 0xe1, + 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xf1, 0x46, 0xe8, 0xd7, 0x30, 0x00, 0x00, +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto new file mode 100644 index 00000000..0c06f124 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto @@ -0,0 +1,776 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.openshift.origin.pkg.build.api.v1; + +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +message BinaryBuildRequestOptions { + // metadata for BinaryBuildRequestOptions. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + optional string asFile = 2; + + // revision.commit is the value identifying a specific commit + optional string revisionCommit = 3; + + // revision.message is the description of a specific commit + optional string revisionMessage = 4; + + // revision.authorName of the source control user + optional string revisionAuthorName = 5; + + // revision.authorEmail of the source control user + optional string revisionAuthorEmail = 6; + + // revision.committerName of the source control user + optional string revisionCommitterName = 7; + + // revision.committerEmail of the source control user + optional string revisionCommitterEmail = 8; +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +message BinaryBuildSource { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + optional string asFile = 1; +} + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +message Build { + // Standard object's metadata. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // spec is all the inputs used to execute the build. + optional BuildSpec spec = 2; + + // status is the current status of the build. + optional BuildStatus status = 3; +} + +// BuildConfig is a template which can be used to create new builds. +message BuildConfig { + // metadata for BuildConfig. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + optional BuildConfigSpec spec = 2; + + // status holds any relevant information about a build config + optional BuildConfigStatus status = 3; +} + +// BuildConfigList is a collection of BuildConfigs. +message BuildConfigList { + // metadata for BuildConfigList. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // items is a list of build configs + repeated BuildConfig items = 2; +} + +// BuildConfigSpec describes when and how builds are created +message BuildConfigSpec { + // triggers determine how new Builds can be launched from a BuildConfig. If + // no triggers are defined, a new build can only occur as a result of an + // explicit client build creation. + repeated BuildTriggerPolicy triggers = 1; + + // RunPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + optional string runPolicy = 2; + + // CommonSpec is the desired build specification + optional CommonSpec commonSpec = 3; +} + +// BuildConfigStatus contains current state of the build config object. +message BuildConfigStatus { + // lastVersion is used to inform about number of last triggered build. + optional int64 lastVersion = 1; +} + +// BuildList is a collection of Builds. +message BuildList { + // metadata for BuildList. + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // items is a list of builds + repeated Build items = 2; +} + +// BuildLog is the (unused) resource associated with the build log redirector +message BuildLog { +} + +// BuildLogOptions is the REST options for a build log +message BuildLogOptions { + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // previous returns previous build logs. Defaults to false. + optional bool previous = 3; + + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; + + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // noWait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // version of the build for which to view logs. + optional int64 version = 10; +} + +// BuildOutput is input to a build strategy and describes the Docker image that the strategy +// should produce. +message BuildOutput { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a Docker image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference to = 1; + + // PushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pushSecret = 2; +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +message BuildPostCommitSpec { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + repeated string command = 1; + + // args is a list of arguments that are provided to either Command, + // Script or the Docker image's default entrypoint. The arguments are + // placed immediately after the command to be run. + repeated string args = 2; + + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + optional string script = 3; +} + +// BuildRequest is the resource used to pass parameters to build generator +message BuildRequest { + // metadata for BuildRequest. + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // revision is the information from the source for a specific repo snapshot. + optional SourceRevision revision = 2; + + // triggeredByImage is the Image that triggered this build. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference triggeredByImage = 3; + + // from is the reference to the ImageStreamTag that triggered the build. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 4; + + // binary indicates a request to build from a binary provided to the builder + optional BinaryBuildSource binary = 5; + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + optional int64 lastVersion = 6; + + // env contains additional environment variables you want to pass into a builder container + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 7; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 8; +} + +// BuildSource is the SCM used for the build. +message BuildSource { + // type of build input to accept + // +k8s:conversion-gen=false + optional string type = 1; + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and Docker builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + optional BinaryBuildSource binary = 2; + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + optional string dockerfile = 3; + + // git contains optional information about git build source + optional GitBuildSource git = 4; + + // images describes a set of images to be used to provide source for the build + repeated ImageSource images = 5; + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + optional string contextDir = 6; + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference sourceSecret = 7; + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + repeated SecretBuildSource secrets = 8; +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +message BuildSpec { + // CommonSpec is the information that represents a build + optional CommonSpec commonSpec = 1; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 2; +} + +// BuildStatus contains the status of a build +message BuildStatus { + // phase is the point in the build lifecycle. + optional string phase = 1; + + // cancelled describes if a cancel event was triggered for the build. + optional bool cancelled = 2; + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + optional string reason = 3; + + // message is a human-readable message indicating details about why the build has this status. + optional string message = 4; + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time startTimestamp = 5; + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTimestamp = 6; + + // duration contains time.Duration object describing build time. + optional int64 duration = 7; + + // outputDockerImageReference contains a reference to the Docker image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + optional string outputDockerImageReference = 8; + + // config is an ObjectReference to the BuildConfig this Build is based on. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference config = 9; +} + +// BuildStrategy contains the details of how to perform a build. +message BuildStrategy { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + optional string type = 1; + + // dockerStrategy holds the parameters to the Docker build strategy. + optional DockerBuildStrategy dockerStrategy = 2; + + // sourceStrategy holds the parameters to the Source build strategy. + optional SourceBuildStrategy sourceStrategy = 3; + + // customStrategy holds the parameters to the Custom build strategy + optional CustomBuildStrategy customStrategy = 4; + + // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // This strategy is in tech preview. + optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +message BuildTriggerCause { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + optional string message = 1; + + // genericWebHook holds data about a builds generic webhook trigger. + optional GenericWebHookCause genericWebHook = 2; + + // gitHubWebHook represents data for a GitHub webhook that fired a + // specific build. + optional GitHubWebHookCause githubWebHook = 3; + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + optional ImageChangeCause imageChangeBuild = 4; +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +message BuildTriggerPolicy { + // type is the type of build trigger + optional string type = 1; + + // github contains the parameters for a GitHub webhook type of trigger + optional WebHookTrigger github = 2; + + // generic contains the parameters for a Generic webhook type of trigger + optional WebHookTrigger generic = 3; + + // imageChange contains parameters for an ImageChange type of trigger + optional ImageChangeTrigger imageChange = 4; +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +message CommonSpec { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + optional string serviceAccount = 1; + + // source describes the SCM in use. + optional BuildSource source = 2; + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + optional SourceRevision revision = 3; + + // strategy defines how to perform a build. + optional BuildStrategy strategy = 4; + + // output describes the Docker image the Strategy should produce. + optional BuildOutput output = 5; + + // resources computes resource requirements to execute the build. + optional k8s.io.kubernetes.pkg.api.v1.ResourceRequirements resources = 6; + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + optional BuildPostCommitSpec postCommit = 7; + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + optional int64 completionDeadlineSeconds = 8; +} + +// CustomBuildStrategy defines input parameters specific to Custom build. +message CustomBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the docker image should be pulled + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the Docker images from the private Docker + // registries + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 3; + + // exposeDockerSocket will allow running Docker commands (and build Docker images) from + // inside the Docker container. + // TODO: Allow admins to enforce 'false' for this option + optional bool exposeDockerSocket = 4; + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + optional bool forcePull = 5; + + // secrets is a list of additional secrets that will be included in the build pod + repeated SecretSpec secrets = 6; + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + optional string buildAPIVersion = 7; +} + +// DockerBuildStrategy defines input parameters specific to Docker build. +message DockerBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the docker image should be pulled + // the resulting image will be used in the FROM line of the Dockerfile for this build. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the Docker images from the private Docker + // registries + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 2; + + // noCache if set to true indicates that the docker build must be executed with the + // --no-cache=true flag + optional bool noCache = 3; + + // env contains additional environment variables you want to pass into a builder container + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 4; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 5; + + // dockerfilePath is the path of the Dockerfile that will be used to build the Docker image, + // relative to the root of the context (contextDir). + optional string dockerfilePath = 6; +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +message GenericWebHookCause { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +message GenericWebHookEvent { + // type is the type of source repository + // +k8s:conversion-gen=false + optional string type = 1; + + // git is the git information if the Type is BuildSourceGit + optional GitInfo git = 2; + + // env contains additional environment variables you want to pass into a builder container + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 3; +} + +// GitBuildSource defines the parameters of a Git SCM +message GitBuildSource { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + optional string uri = 1; + + // ref is the branch/tag/ref to build. + optional string ref = 2; + + // httpProxy is a proxy used to reach the git repository over http + optional string httpProxy = 3; + + // httpsProxy is a proxy used to reach the git repository over https + optional string httpsProxy = 4; +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +message GitHubWebHookCause { + // revision is the git revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GitInfo is the aggregated git information for a generic webhook post +message GitInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; +} + +// GitSourceRevision is the commit information from a git source for a build +message GitSourceRevision { + // commit is the commit hash identifying a specific commit + optional string commit = 1; + + // author is the author of a specific commit + optional SourceControlUser author = 2; + + // committer is the committer of a specific commit + optional SourceControlUser committer = 3; + + // message is the description of a specific commit + optional string message = 4; +} + +// ImageChangeCause contains information about the image that triggered a +// build +message ImageChangeCause { + // imageID is the ID of the image that triggered a a new build. + optional string imageID = 1; + + // fromRef contains detailed information about an image that triggered a + // build. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference fromRef = 2; +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +message ImageChangeTrigger { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + optional string lastTriggeredImageID = 1; + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 2; +} + +// ImageSource describes an image that is used as source for the build +message ImageSource { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; + + // paths is a list of source and destination paths to copy from the image. + repeated ImageSourcePath paths = 2; + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 3; +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +message ImageSourcePath { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. + optional string sourcePath = 1; + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + optional string destinationDir = 2; +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// This strategy is in tech preview. +message JenkinsPipelineBuildStrategy { + // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + optional string jenkinsfilePath = 1; + + // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + optional string jenkinsfile = 2; +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +message SecretBuildSource { + // secret is a reference to an existing secret that you want to use in your + // build. + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secret = 1; + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the Docker build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during docker build. + optional string destinationDir = 2; +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +message SecretSpec { + // secretSource is a reference to the secret + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretSource = 1; + + // mountPath is the path at which to mount the secret + optional string mountPath = 2; +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +message SourceBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the docker image should be pulled + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the Docker images from the private Docker + // registries + optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 3; + + // scripts is the location of Source scripts + optional string scripts = 4; + + // incremental flag forces the Source build to do incremental builds if true. + optional bool incremental = 5; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 6; + + // runtimeImage is an optional image that is used to run an application + // without unneeded dependencies installed. The building of the application + // is still done in the builder image but, post build, you can copy the + // needed artifacts in the runtime image for use. + // This field and the feature it enables are in tech preview. + optional k8s.io.kubernetes.pkg.api.v1.ObjectReference runtimeImage = 7; + + // runtimeArtifacts specifies a list of source/destination pairs that will be + // copied from the builder to the runtime image. sourcePath can be a file or + // directory. destinationDir must be a directory. destinationDir can also be + // empty or equal to ".", in this case it just refers to the root of WORKDIR. + // This field and the feature it enables are in tech preview. + repeated ImageSourcePath runtimeArtifacts = 8; +} + +// SourceControlUser defines the identity of a user of source control +message SourceControlUser { + // name of the source control user + optional string name = 1; + + // email of the source control user + optional string email = 2; +} + +// SourceRevision is the revision or commit information from the source for the build +message SourceRevision { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + optional string type = 1; + + // Git contains information about git-based build source + optional GitSourceRevision git = 2; +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +message WebHookTrigger { + // secret used to validate requests. + optional string secret = 1; + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + optional bool allowEnv = 2; +} + diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/register.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/register.go new file mode 100644 index 00000000..c79f57f0 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/runtime" +) + +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + ) + return nil +} + +func (obj *Build) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BuildList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BuildConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BuildConfigList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BuildLog) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BuildRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BuildLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *BinaryBuildRequestOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go new file mode 100644 index 00000000..bab94f8a --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go @@ -0,0 +1,459 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_BinaryBuildRequestOptions = map[string]string{ + "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request", + "metadata": "metadata for BinaryBuildRequestOptions.", + "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive", + "revision.commit": "revision.commit is the value identifying a specific commit", + "revision.message": "revision.message is the description of a specific commit", + "revision.authorName": "revision.authorName of the source control user", + "revision.authorEmail": "revision.authorEmail of the source control user", + "revision.committerName": "revision.committerName of the source control user", + "revision.committerEmail": "revision.committerEmail of the source control user", +} + +func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string { + return map_BinaryBuildRequestOptions +} + +var map_BinaryBuildSource = map[string]string{ + "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.", + "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.", +} + +func (BinaryBuildSource) SwaggerDoc() map[string]string { + return map_BinaryBuildSource +} + +var map_Build = map[string]string{ + "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.", + "metadata": "Standard object's metadata.", + "spec": "spec is all the inputs used to execute the build.", + "status": "status is the current status of the build.", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildConfig = map[string]string{ + "": "BuildConfig is a template which can be used to create new builds.", + "metadata": "metadata for BuildConfig.", + "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", + "status": "status holds any relevant information about a build config", +} + +func (BuildConfig) SwaggerDoc() map[string]string { + return map_BuildConfig +} + +var map_BuildConfigList = map[string]string{ + "": "BuildConfigList is a collection of BuildConfigs.", + "metadata": "metadata for BuildConfigList.", + "items": "items is a list of build configs", +} + +func (BuildConfigList) SwaggerDoc() map[string]string { + return map_BuildConfigList +} + +var map_BuildConfigSpec = map[string]string{ + "": "BuildConfigSpec describes when and how builds are created", + "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", + "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", +} + +func (BuildConfigSpec) SwaggerDoc() map[string]string { + return map_BuildConfigSpec +} + +var map_BuildConfigStatus = map[string]string{ + "": "BuildConfigStatus contains current state of the build config object.", + "lastVersion": "lastVersion is used to inform about number of last triggered build.", +} + +func (BuildConfigStatus) SwaggerDoc() map[string]string { + return map_BuildConfigStatus +} + +var map_BuildList = map[string]string{ + "": "BuildList is a collection of Builds.", + "metadata": "metadata for BuildList.", + "items": "items is a list of builds", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildLog = map[string]string{ + "": "BuildLog is the (unused) resource associated with the build log redirector", +} + +func (BuildLog) SwaggerDoc() map[string]string { + return map_BuildLog +} + +var map_BuildLogOptions = map[string]string{ + "": "BuildLogOptions is the REST options for a build log", + "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "previous returns previous build logs. Defaults to false.", + "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "version": "version of the build for which to view logs.", +} + +func (BuildLogOptions) SwaggerDoc() map[string]string { + return map_BuildLogOptions +} + +var map_BuildOutput = map[string]string{ + "": "BuildOutput is input to a build strategy and describes the Docker image that the strategy should produce.", + "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a Docker image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", + "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", +} + +func (BuildOutput) SwaggerDoc() map[string]string { + return map_BuildOutput +} + +var map_BuildPostCommitSpec = map[string]string{ + "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n \"postCommit\": {\n \"script\": \"rake test --verbose\",\n }\n\n The above is a convenient form which is equivalent to:\n\n \"postCommit\": {\n \"command\": [\"/bin/sh\", \"-ic\"],\n \"args\": [\"rake test --verbose\"]\n }\n\n2. A command as the image entrypoint:\n\n \"postCommit\": {\n \"commit\": [\"rake\", \"test\", \"--verbose\"]\n }\n\n Command overrides the image entrypoint in the exec form, as documented in\n Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n \"postCommit\": {\n\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n \"postCommit\": {\n \"script\": \"rake test $1\",\n \"args\": [\"--verbose\"]\n }\n\n This form is useful if you need to pass arguments that would otherwise be\n hard to quote properly in the shell script. In the script, $0 will be\n \"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n \"postCommit\": {\n \"command\": [\"rake\", \"test\"],\n \"args\": [\"--verbose\"]\n }\n\n This form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.", + "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.", + "args": "args is a list of arguments that are provided to either Command, Script or the Docker image's default entrypoint. The arguments are placed immediately after the command to be run.", + "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.", +} + +func (BuildPostCommitSpec) SwaggerDoc() map[string]string { + return map_BuildPostCommitSpec +} + +var map_BuildRequest = map[string]string{ + "": "BuildRequest is the resource used to pass parameters to build generator", + "metadata": "metadata for BuildRequest.", + "revision": "revision is the information from the source for a specific repo snapshot.", + "triggeredByImage": "triggeredByImage is the Image that triggered this build.", + "from": "from is the reference to the ImageStreamTag that triggered the build.", + "binary": "binary indicates a request to build from a binary provided to the builder", + "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", + "env": "env contains additional environment variables you want to pass into a builder container", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", +} + +func (BuildRequest) SwaggerDoc() map[string]string { + return map_BuildRequest +} + +var map_BuildSource = map[string]string{ + "": "BuildSource is the SCM used for the build.", + "type": "type of build input to accept", + "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and Docker builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.", + "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.", + "git": "git contains optional information about git build source", + "images": "images describes a set of images to be used to provide source for the build", + "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.", + "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.", + "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.", +} + +func (BuildSource) SwaggerDoc() map[string]string { + return map_BuildSource +} + +var map_BuildSpec = map[string]string{ + "": "BuildSpec has the information to represent a build and also additional information about a build", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_BuildStatus = map[string]string{ + "": "BuildStatus contains the status of a build", + "phase": "phase is the point in the build lifecycle.", + "cancelled": "cancelled describes if a cancel event was triggered for the build.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "message": "message is a human-readable message indicating details about why the build has this status.", + "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.", + "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.", + "duration": "duration contains time.Duration object describing build time.", + "outputDockerImageReference": "outputDockerImageReference contains a reference to the Docker image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.", + "config": "config is an ObjectReference to the BuildConfig this Build is based on.", +} + +func (BuildStatus) SwaggerDoc() map[string]string { + return map_BuildStatus +} + +var map_BuildStrategy = map[string]string{ + "": "BuildStrategy contains the details of how to perform a build.", + "type": "type is the kind of build strategy.", + "dockerStrategy": "dockerStrategy holds the parameters to the Docker build strategy.", + "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", + "customStrategy": "customStrategy holds the parameters to the Custom build strategy", + "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. This strategy is in tech preview.", +} + +func (BuildStrategy) SwaggerDoc() map[string]string { + return map_BuildStrategy +} + +var map_BuildTriggerCause = map[string]string{ + "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", + "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", + "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", + "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", + "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", +} + +func (BuildTriggerCause) SwaggerDoc() map[string]string { + return map_BuildTriggerCause +} + +var map_BuildTriggerPolicy = map[string]string{ + "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.", + "type": "type is the type of build trigger", + "github": "github contains the parameters for a GitHub webhook type of trigger", + "generic": "generic contains the parameters for a Generic webhook type of trigger", + "imageChange": "imageChange contains parameters for an ImageChange type of trigger", +} + +func (BuildTriggerPolicy) SwaggerDoc() map[string]string { + return map_BuildTriggerPolicy +} + +var map_CommonSpec = map[string]string{ + "": "CommonSpec encapsulates all the inputs necessary to represent a build.", + "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount", + "source": "source describes the SCM in use.", + "revision": "revision is the information from the source for a specific repo snapshot. This is optional.", + "strategy": "strategy defines how to perform a build.", + "output": "output describes the Docker image the Strategy should produce.", + "resources": "resources computes resource requirements to execute the build.", + "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", + "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", +} + +func (CommonSpec) SwaggerDoc() map[string]string { + return map_CommonSpec +} + +var map_CustomBuildStrategy = map[string]string{ + "": "CustomBuildStrategy defines input parameters specific to Custom build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container", + "exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build Docker images) from inside the Docker container.", + "forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally", + "secrets": "secrets is a list of additional secrets that will be included in the build pod", + "buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder", +} + +func (CustomBuildStrategy) SwaggerDoc() map[string]string { + return map_CustomBuildStrategy +} + +var map_DockerBuildStrategy = map[string]string{ + "": "DockerBuildStrategy defines input parameters specific to Docker build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled the resulting image will be used in the FROM line of the Dockerfile for this build.", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries", + "noCache": "noCache if set to true indicates that the docker build must be executed with the --no-cache=true flag", + "env": "env contains additional environment variables you want to pass into a builder container", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the Docker image, relative to the root of the context (contextDir).", +} + +func (DockerBuildStrategy) SwaggerDoc() map[string]string { + return map_DockerBuildStrategy +} + +var map_GenericWebHookCause = map[string]string{ + "": "GenericWebHookCause holds information about a generic WebHook that triggered a build.", + "revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GenericWebHookCause) SwaggerDoc() map[string]string { + return map_GenericWebHookCause +} + +var map_GenericWebHookEvent = map[string]string{ + "": "GenericWebHookEvent is the payload expected for a generic webhook post", + "type": "type is the type of source repository", + "git": "git is the git information if the Type is BuildSourceGit", + "env": "env contains additional environment variables you want to pass into a builder container", +} + +func (GenericWebHookEvent) SwaggerDoc() map[string]string { + return map_GenericWebHookEvent +} + +var map_GitBuildSource = map[string]string{ + "": "GitBuildSource defines the parameters of a Git SCM", + "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", + "ref": "ref is the branch/tag/ref to build.", + "httpProxy": "httpProxy is a proxy used to reach the git repository over http", + "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", +} + +func (GitBuildSource) SwaggerDoc() map[string]string { + return map_GitBuildSource +} + +var map_GitHubWebHookCause = map[string]string{ + "": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.", + "revision": "revision is the git revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GitHubWebHookCause) SwaggerDoc() map[string]string { + return map_GitHubWebHookCause +} + +var map_GitInfo = map[string]string{ + "": "GitInfo is the aggregated git information for a generic webhook post", +} + +func (GitInfo) SwaggerDoc() map[string]string { + return map_GitInfo +} + +var map_GitSourceRevision = map[string]string{ + "": "GitSourceRevision is the commit information from a git source for a build", + "commit": "commit is the commit hash identifying a specific commit", + "author": "author is the author of a specific commit", + "committer": "committer is the committer of a specific commit", + "message": "message is the description of a specific commit", +} + +func (GitSourceRevision) SwaggerDoc() map[string]string { + return map_GitSourceRevision +} + +var map_ImageChangeCause = map[string]string{ + "": "ImageChangeCause contains information about the image that triggered a build", + "imageID": "imageID is the ID of the image that triggered a a new build.", + "fromRef": "fromRef contains detailed information about an image that triggered a build.", +} + +func (ImageChangeCause) SwaggerDoc() map[string]string { + return map_ImageChangeCause +} + +var map_ImageChangeTrigger = map[string]string{ + "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", + "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build", + "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.", +} + +func (ImageChangeTrigger) SwaggerDoc() map[string]string { + return map_ImageChangeTrigger +} + +var map_ImageSource = map[string]string{ + "": "ImageSource describes an image that is used as source for the build", + "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.", + "paths": "paths is a list of source and destination paths to copy from the image.", + "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.", +} + +func (ImageSource) SwaggerDoc() map[string]string { + return map_ImageSource +} + +var map_ImageSourcePath = map[string]string{ + "": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.", + "sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory.", + "destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.", +} + +func (ImageSourcePath) SwaggerDoc() map[string]string { + return map_ImageSourcePath +} + +var map_JenkinsPipelineBuildStrategy = map[string]string{ + "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. This strategy is in tech preview.", + "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", +} + +func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string { + return map_JenkinsPipelineBuildStrategy +} + +var map_SecretBuildSource = map[string]string{ + "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.", + "secret": "secret is a reference to an existing secret that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the Docker build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during docker build.", +} + +func (SecretBuildSource) SwaggerDoc() map[string]string { + return map_SecretBuildSource +} + +var map_SecretSpec = map[string]string{ + "": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point", + "secretSource": "secretSource is a reference to the secret", + "mountPath": "mountPath is the path at which to mount the secret", +} + +func (SecretSpec) SwaggerDoc() map[string]string { + return map_SecretSpec +} + +var map_SourceBuildStrategy = map[string]string{ + "": "SourceBuildStrategy defines input parameters specific to an Source build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container", + "scripts": "scripts is the location of Source scripts", + "incremental": "incremental flag forces the Source build to do incremental builds if true.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "runtimeImage": "runtimeImage is an optional image that is used to run an application without unneeded dependencies installed. The building of the application is still done in the builder image but, post build, you can copy the needed artifacts in the runtime image for use. This field and the feature it enables are in tech preview.", + "runtimeArtifacts": "runtimeArtifacts specifies a list of source/destination pairs that will be copied from the builder to the runtime image. sourcePath can be a file or directory. destinationDir must be a directory. destinationDir can also be empty or equal to \".\", in this case it just refers to the root of WORKDIR. This field and the feature it enables are in tech preview.", +} + +func (SourceBuildStrategy) SwaggerDoc() map[string]string { + return map_SourceBuildStrategy +} + +var map_SourceControlUser = map[string]string{ + "": "SourceControlUser defines the identity of a user of source control", + "name": "name of the source control user", + "email": "email of the source control user", +} + +func (SourceControlUser) SwaggerDoc() map[string]string { + return map_SourceControlUser +} + +var map_SourceRevision = map[string]string{ + "": "SourceRevision is the revision or commit information from the source for the build", + "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", + "git": "Git contains information about git-based build source", +} + +func (SourceRevision) SwaggerDoc() map[string]string { + return map_SourceRevision +} + +var map_WebHookTrigger = map[string]string{ + "": "WebHookTrigger is a trigger that gets invoked using a webhook type of post", + "secret": "secret used to validate requests.", + "allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.", +} + +func (WebHookTrigger) SwaggerDoc() map[string]string { + return map_WebHookTrigger +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go new file mode 100644 index 00000000..a3e8f56c --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go @@ -0,0 +1,884 @@ +package v1 + +import ( + "time" + + "k8s.io/kubernetes/pkg/api/unversioned" + kapi "k8s.io/kubernetes/pkg/api/v1" +) + +// +genclient=true + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +type Build struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is all the inputs used to execute the build. + Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current status of the build. + Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +type BuildSpec struct { + // CommonSpec is the information that represents a build + CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,2,rep,name=triggeredBy"` +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +type CommonSpec struct { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"` + + // source describes the SCM in use. + Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` + + // strategy defines how to perform a build. + Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"` + + // output describes the Docker image the Strategy should produce. + Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"` + + // resources computes resource requirements to execute the build. + Resources kapi.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"` + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"` + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +type BuildTriggerCause struct { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + + // genericWebHook holds data about a builds generic webhook trigger. + GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` + + // gitHubWebHook represents data for a GitHub webhook that fired a + //specific build. + GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +type GenericWebHookCause struct { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +type GitHubWebHookCause struct { + // revision is the git revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// ImageChangeCause contains information about the image that triggered a +// build +type ImageChangeCause struct { + // imageID is the ID of the image that triggered a a new build. + ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"` + + // fromRef contains detailed information about an image that triggered a + // build. + FromRef *kapi.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"` +} + +// BuildStatus contains the status of a build +type BuildStatus struct { + // phase is the point in the build lifecycle. + Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` + + // cancelled describes if a cancel event was triggered for the build. + Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` + + // message is a human-readable message indicating details about why the build has this status. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + StartTimestamp *unversioned.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + CompletionTimestamp *unversioned.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` + + // duration contains time.Duration object describing build time. + Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` + + // outputDockerImageReference contains a reference to the Docker image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` + + // config is an ObjectReference to the BuildConfig this Build is based on. + Config *kapi.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` +} + +// BuildPhase represents the status of a build at a point in time. +type BuildPhase string + +// Valid values for BuildPhase. +const ( + // BuildPhaseNew is automatically assigned to a newly created build. + BuildPhaseNew BuildPhase = "New" + + // BuildPhasePending indicates that a pod name has been assigned and a build is + // about to start running. + BuildPhasePending BuildPhase = "Pending" + + // BuildPhaseRunning indicates that a pod has been created and a build is running. + BuildPhaseRunning BuildPhase = "Running" + + // BuildPhaseComplete indicates that a build has been successful. + BuildPhaseComplete BuildPhase = "Complete" + + // BuildPhaseFailed indicates that a build has executed and failed. + BuildPhaseFailed BuildPhase = "Failed" + + // BuildPhaseError indicates that an error prevented the build from executing. + BuildPhaseError BuildPhase = "Error" + + // BuildPhaseCancelled indicates that a running/pending build was stopped from executing. + BuildPhaseCancelled BuildPhase = "Cancelled" +) + +// StatusReason is a brief CamelCase string that describes a temporary or +// permanent build error condition, meant for machine parsing and tidy display +// in the CLI. +type StatusReason string + +// BuildSourceType is the type of SCM used. +type BuildSourceType string + +// Valid values for BuildSourceType. +const ( + //BuildSourceGit instructs a build to use a Git source control repository as the build input. + BuildSourceGit BuildSourceType = "Git" + // BuildSourceDockerfile uses a Dockerfile as the start of a build + BuildSourceDockerfile BuildSourceType = "Dockerfile" + // BuildSourceBinary indicates the build will accept a Binary file as input. + BuildSourceBinary BuildSourceType = "Binary" + // BuildSourceImage indicates the build will accept an image as input + BuildSourceImage BuildSourceType = "Image" + // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies) + BuildSourceNone BuildSourceType = "None" +) + +// BuildSource is the SCM used for the build. +type BuildSource struct { + // type of build input to accept + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and Docker builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"` + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"` + + // git contains optional information about git build source + Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` + + // images describes a set of images to be used to provide source for the build + Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"` + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"` + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + SourceSecret *kapi.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"` + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` +} + +// ImageSource describes an image that is used as source for the build +type ImageSource struct { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // paths is a list of source and destination paths to copy from the image. + Paths []ImageSourcePath `json:"paths" protobuf:"bytes,2,rep,name=paths"` + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"` +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +type ImageSourcePath struct { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. + SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"` + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +type SecretBuildSource struct { + // secret is a reference to an existing secret that you want to use in your + // build. + Secret kapi.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"` + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the Docker build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during docker build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +type BinaryBuildSource struct { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"` +} + +// SourceRevision is the revision or commit information from the source for the build +type SourceRevision struct { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // Git contains information about git-based build source + Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` +} + +// GitSourceRevision is the commit information from a git source for a build +type GitSourceRevision struct { + // commit is the commit hash identifying a specific commit + Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"` + + // author is the author of a specific commit + Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"` + + // committer is the committer of a specific commit + Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"` + + // message is the description of a specific commit + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// GitBuildSource defines the parameters of a Git SCM +type GitBuildSource struct { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"` + + // ref is the branch/tag/ref to build. + Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` + + // httpProxy is a proxy used to reach the git repository over http + HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` + + // httpsProxy is a proxy used to reach the git repository over https + HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` +} + +// SourceControlUser defines the identity of a user of source control +type SourceControlUser struct { + // name of the source control user + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // email of the source control user + Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"` +} + +// BuildStrategy contains the details of how to perform a build. +type BuildStrategy struct { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + Type BuildStrategyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"` + + // dockerStrategy holds the parameters to the Docker build strategy. + DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"` + + // sourceStrategy holds the parameters to the Source build strategy. + SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"` + + // customStrategy holds the parameters to the Custom build strategy + CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` + + // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // This strategy is in tech preview. + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` +} + +// BuildStrategyType describes a particular way of performing a build. +type BuildStrategyType string + +// Valid values for BuildStrategyType. +const ( + // DockerBuildStrategyType performs builds using a Dockerfile. + DockerBuildStrategyType BuildStrategyType = "Docker" + + // SourceBuildStrategyType performs builds build using Source To Images with a Git repository + // and a builder image. + SourceBuildStrategyType BuildStrategyType = "Source" + + // CustomBuildStrategyType performs builds using custom builder Docker image. + CustomBuildStrategyType BuildStrategyType = "Custom" + + // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline. + JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline" +) + +// CustomBuildStrategy defines input parameters specific to Custom build. +type CustomBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the docker image should be pulled + From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the Docker images from the private Docker + // registries + PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container + Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // exposeDockerSocket will allow running Docker commands (and build Docker images) from + // inside the Docker container. + // TODO: Allow admins to enforce 'false' for this option + ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"` + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // secrets is a list of additional secrets that will be included in the build pod + Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"` + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"` +} + +// DockerBuildStrategy defines input parameters specific to Docker build. +type DockerBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the docker image should be pulled + // the resulting image will be used in the FROM line of the Dockerfile for this build. + From *kapi.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the Docker images from the private Docker + // registries + PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // noCache if set to true indicates that the docker build must be executed with the + // --no-cache=true flag + NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"` + + // env contains additional environment variables you want to pass into a builder container + Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // dockerfilePath is the path of the Dockerfile that will be used to build the Docker image, + // relative to the root of the context (contextDir). + DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"` +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +type SourceBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the docker image should be pulled + From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the Docker images from the private Docker + // registries + PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container + Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // scripts is the location of Source scripts + Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"` + + // incremental flag forces the Source build to do incremental builds if true. + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"` + + // runtimeImage is an optional image that is used to run an application + // without unneeded dependencies installed. The building of the application + // is still done in the builder image but, post build, you can copy the + // needed artifacts in the runtime image for use. + // This field and the feature it enables are in tech preview. + RuntimeImage *kapi.ObjectReference `json:"runtimeImage,omitempty" protobuf:"bytes,7,opt,name=runtimeImage"` + + // runtimeArtifacts specifies a list of source/destination pairs that will be + // copied from the builder to the runtime image. sourcePath can be a file or + // directory. destinationDir must be a directory. destinationDir can also be + // empty or equal to ".", in this case it just refers to the root of WORKDIR. + // This field and the feature it enables are in tech preview. + RuntimeArtifacts []ImageSourcePath `json:"runtimeArtifacts,omitempty" protobuf:"bytes,8,rep,name=runtimeArtifacts"` +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// This strategy is in tech preview. +type JenkinsPipelineBuildStrategy struct { + // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` + + // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +type BuildPostCommitSpec struct { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` + // args is a list of arguments that are provided to either Command, + // Script or the Docker image's default entrypoint. The arguments are + // placed immediately after the command to be run. + Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"` + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"` +} + +// BuildOutput is input to a build strategy and describes the Docker image that the strategy +// should produce. +type BuildOutput struct { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a Docker image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + To *kapi.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` + + // PushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + PushSecret *kapi.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` +} + +// BuildConfig is a template which can be used to create new builds. +type BuildConfig struct { + unversioned.TypeMeta `json:",inline"` + // metadata for BuildConfig. + kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status holds any relevant information about a build config + Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// BuildConfigSpec describes when and how builds are created +type BuildConfigSpec struct { + + //triggers determine how new Builds can be launched from a BuildConfig. If + //no triggers are defined, a new build can only occur as a result of an + //explicit client build creation. + Triggers []BuildTriggerPolicy `json:"triggers" protobuf:"bytes,1,rep,name=triggers"` + + // RunPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` + + // CommonSpec is the desired build specification + CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"` +} + +// BuildRunPolicy defines the behaviour of how the new builds are executed +// from the existing build configuration. +type BuildRunPolicy string + +const ( + // BuildRunPolicyParallel schedules new builds immediately after they are + // created. Builds will be executed in parallel. + BuildRunPolicyParallel BuildRunPolicy = "Parallel" + + // BuildRunPolicySerial schedules new builds to execute in a sequence as + // they are created. Every build gets queued up and will execute when the + // previous build completes. This is the default policy. + BuildRunPolicySerial BuildRunPolicy = "Serial" + + // BuildRunPolicySerialLatestOnly schedules only the latest build to execute, + // cancelling all the previously queued build. + BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly" +) + +// BuildConfigStatus contains current state of the build config object. +type BuildConfigStatus struct { + // lastVersion is used to inform about number of last triggered build. + LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +type WebHookTrigger struct { + // secret used to validate requests. + Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"` +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +type ImageChangeTrigger struct { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + From *kapi.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +type BuildTriggerPolicy struct { + // type is the type of build trigger + Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"` + + // github contains the parameters for a GitHub webhook type of trigger + GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"` + + // generic contains the parameters for a Generic webhook type of trigger + GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"` + + // imageChange contains parameters for an ImageChange type of trigger + ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"` +} + +// BuildTriggerType refers to a specific BuildTriggerPolicy implementation. +type BuildTriggerType string + +const ( + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub" + GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github" + + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + GenericWebHookBuildTriggerType BuildTriggerType = "Generic" + GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic" + + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + ImageChangeBuildTriggerType BuildTriggerType = "ImageChange" + ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange" + + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange" +) + +// BuildList is a collection of Builds. +type BuildList struct { + unversioned.TypeMeta `json:",inline"` + // metadata for BuildList. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of builds + Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// BuildConfigList is a collection of BuildConfigs. +type BuildConfigList struct { + unversioned.TypeMeta `json:",inline"` + // metadata for BuildConfigList. + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of build configs + Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +type GenericWebHookEvent struct { + // type is the type of source repository + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git is the git information if the Type is BuildSourceGit + Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` + + // env contains additional environment variables you want to pass into a builder container + Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` +} + +// GitInfo is the aggregated git information for a generic webhook post +type GitInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` +} + +// BuildLog is the (unused) resource associated with the build log redirector +type BuildLog struct { + unversioned.TypeMeta `json:",inline"` +} + +// BuildRequest is the resource used to pass parameters to build generator +type BuildRequest struct { + unversioned.TypeMeta `json:",inline"` + // metadata for BuildRequest. + kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // revision is the information from the source for a specific repo snapshot. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + + // triggeredByImage is the Image that triggered this build. + TriggeredByImage *kapi.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"` + + // from is the reference to the ImageStreamTag that triggered the build. + From *kapi.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` + + // binary indicates a request to build from a binary provided to the builder + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"` + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"` + + // env contains additional environment variables you want to pass into a builder container + Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,8,rep,name=triggeredBy"` +} + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +type BinaryBuildRequestOptions struct { + unversioned.TypeMeta `json:",inline"` + // metadata for BinaryBuildRequestOptions. + kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"` + + // TODO: Improve map[string][]string conversion so we can handled nested objects + + // revision.commit is the value identifying a specific commit + Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"` + + // revision.message is the description of a specific commit + Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"` + + // revision.authorName of the source control user + AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"` + + // revision.authorEmail of the source control user + AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"` + + // revision.committerName of the source control user + CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"` + + // revision.committerEmail of the source control user + CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"` +} + +// BuildLogOptions is the REST options for a build log +type BuildLogOptions struct { + unversioned.TypeMeta `json:",inline"` + + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // previous returns previous build logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // noWait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // version of the build for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +type SecretSpec struct { + // secretSource is a reference to the secret + SecretSource kapi.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"` + + // mountPath is the path at which to mount the secret + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go new file mode 100644 index 00000000..3888ae35 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go @@ -0,0 +1,1960 @@ +// +build !ignore_autogenerated_openshift + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + api "github.com/openshift/origin/pkg/build/api" + pkg_api "k8s.io/kubernetes/pkg/api" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" + time "time" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_BinaryBuildRequestOptions_To_api_BinaryBuildRequestOptions, + Convert_api_BinaryBuildRequestOptions_To_v1_BinaryBuildRequestOptions, + Convert_v1_BinaryBuildSource_To_api_BinaryBuildSource, + Convert_api_BinaryBuildSource_To_v1_BinaryBuildSource, + Convert_v1_Build_To_api_Build, + Convert_api_Build_To_v1_Build, + Convert_v1_BuildConfig_To_api_BuildConfig, + Convert_api_BuildConfig_To_v1_BuildConfig, + Convert_v1_BuildConfigList_To_api_BuildConfigList, + Convert_api_BuildConfigList_To_v1_BuildConfigList, + Convert_v1_BuildConfigSpec_To_api_BuildConfigSpec, + Convert_api_BuildConfigSpec_To_v1_BuildConfigSpec, + Convert_v1_BuildConfigStatus_To_api_BuildConfigStatus, + Convert_api_BuildConfigStatus_To_v1_BuildConfigStatus, + Convert_v1_BuildList_To_api_BuildList, + Convert_api_BuildList_To_v1_BuildList, + Convert_v1_BuildLog_To_api_BuildLog, + Convert_api_BuildLog_To_v1_BuildLog, + Convert_v1_BuildLogOptions_To_api_BuildLogOptions, + Convert_api_BuildLogOptions_To_v1_BuildLogOptions, + Convert_v1_BuildOutput_To_api_BuildOutput, + Convert_api_BuildOutput_To_v1_BuildOutput, + Convert_v1_BuildPostCommitSpec_To_api_BuildPostCommitSpec, + Convert_api_BuildPostCommitSpec_To_v1_BuildPostCommitSpec, + Convert_v1_BuildRequest_To_api_BuildRequest, + Convert_api_BuildRequest_To_v1_BuildRequest, + Convert_v1_BuildSource_To_api_BuildSource, + Convert_api_BuildSource_To_v1_BuildSource, + Convert_v1_BuildSpec_To_api_BuildSpec, + Convert_api_BuildSpec_To_v1_BuildSpec, + Convert_v1_BuildStatus_To_api_BuildStatus, + Convert_api_BuildStatus_To_v1_BuildStatus, + Convert_v1_BuildStrategy_To_api_BuildStrategy, + Convert_api_BuildStrategy_To_v1_BuildStrategy, + Convert_v1_BuildTriggerCause_To_api_BuildTriggerCause, + Convert_api_BuildTriggerCause_To_v1_BuildTriggerCause, + Convert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy, + Convert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy, + Convert_v1_CommonSpec_To_api_CommonSpec, + Convert_api_CommonSpec_To_v1_CommonSpec, + Convert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy, + Convert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy, + Convert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy, + Convert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy, + Convert_v1_GenericWebHookCause_To_api_GenericWebHookCause, + Convert_api_GenericWebHookCause_To_v1_GenericWebHookCause, + Convert_v1_GenericWebHookEvent_To_api_GenericWebHookEvent, + Convert_api_GenericWebHookEvent_To_v1_GenericWebHookEvent, + Convert_v1_GitBuildSource_To_api_GitBuildSource, + Convert_api_GitBuildSource_To_v1_GitBuildSource, + Convert_v1_GitHubWebHookCause_To_api_GitHubWebHookCause, + Convert_api_GitHubWebHookCause_To_v1_GitHubWebHookCause, + Convert_v1_GitInfo_To_api_GitInfo, + Convert_api_GitInfo_To_v1_GitInfo, + Convert_v1_GitSourceRevision_To_api_GitSourceRevision, + Convert_api_GitSourceRevision_To_v1_GitSourceRevision, + Convert_v1_ImageChangeCause_To_api_ImageChangeCause, + Convert_api_ImageChangeCause_To_v1_ImageChangeCause, + Convert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger, + Convert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger, + Convert_v1_ImageSource_To_api_ImageSource, + Convert_api_ImageSource_To_v1_ImageSource, + Convert_v1_ImageSourcePath_To_api_ImageSourcePath, + Convert_api_ImageSourcePath_To_v1_ImageSourcePath, + Convert_v1_JenkinsPipelineBuildStrategy_To_api_JenkinsPipelineBuildStrategy, + Convert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy, + Convert_v1_SecretBuildSource_To_api_SecretBuildSource, + Convert_api_SecretBuildSource_To_v1_SecretBuildSource, + Convert_v1_SecretSpec_To_api_SecretSpec, + Convert_api_SecretSpec_To_v1_SecretSpec, + Convert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy, + Convert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy, + Convert_v1_SourceControlUser_To_api_SourceControlUser, + Convert_api_SourceControlUser_To_v1_SourceControlUser, + Convert_v1_SourceRevision_To_api_SourceRevision, + Convert_api_SourceRevision_To_v1_SourceRevision, + Convert_v1_WebHookTrigger_To_api_WebHookTrigger, + Convert_api_WebHookTrigger_To_v1_WebHookTrigger, + ) +} + +func autoConvert_v1_BinaryBuildRequestOptions_To_api_BinaryBuildRequestOptions(in *BinaryBuildRequestOptions, out *api.BinaryBuildRequestOptions, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + out.AsFile = in.AsFile + out.Commit = in.Commit + out.Message = in.Message + out.AuthorName = in.AuthorName + out.AuthorEmail = in.AuthorEmail + out.CommitterName = in.CommitterName + out.CommitterEmail = in.CommitterEmail + return nil +} + +func Convert_v1_BinaryBuildRequestOptions_To_api_BinaryBuildRequestOptions(in *BinaryBuildRequestOptions, out *api.BinaryBuildRequestOptions, s conversion.Scope) error { + return autoConvert_v1_BinaryBuildRequestOptions_To_api_BinaryBuildRequestOptions(in, out, s) +} + +func autoConvert_api_BinaryBuildRequestOptions_To_v1_BinaryBuildRequestOptions(in *api.BinaryBuildRequestOptions, out *BinaryBuildRequestOptions, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + out.AsFile = in.AsFile + out.Commit = in.Commit + out.Message = in.Message + out.AuthorName = in.AuthorName + out.AuthorEmail = in.AuthorEmail + out.CommitterName = in.CommitterName + out.CommitterEmail = in.CommitterEmail + return nil +} + +func Convert_api_BinaryBuildRequestOptions_To_v1_BinaryBuildRequestOptions(in *api.BinaryBuildRequestOptions, out *BinaryBuildRequestOptions, s conversion.Scope) error { + return autoConvert_api_BinaryBuildRequestOptions_To_v1_BinaryBuildRequestOptions(in, out, s) +} + +func autoConvert_v1_BinaryBuildSource_To_api_BinaryBuildSource(in *BinaryBuildSource, out *api.BinaryBuildSource, s conversion.Scope) error { + out.AsFile = in.AsFile + return nil +} + +func Convert_v1_BinaryBuildSource_To_api_BinaryBuildSource(in *BinaryBuildSource, out *api.BinaryBuildSource, s conversion.Scope) error { + return autoConvert_v1_BinaryBuildSource_To_api_BinaryBuildSource(in, out, s) +} + +func autoConvert_api_BinaryBuildSource_To_v1_BinaryBuildSource(in *api.BinaryBuildSource, out *BinaryBuildSource, s conversion.Scope) error { + out.AsFile = in.AsFile + return nil +} + +func Convert_api_BinaryBuildSource_To_v1_BinaryBuildSource(in *api.BinaryBuildSource, out *BinaryBuildSource, s conversion.Scope) error { + return autoConvert_api_BinaryBuildSource_To_v1_BinaryBuildSource(in, out, s) +} + +func autoConvert_v1_Build_To_api_Build(in *Build, out *api.Build, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_BuildSpec_To_api_BuildSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_BuildStatus_To_api_BuildStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Build_To_api_Build(in *Build, out *api.Build, s conversion.Scope) error { + return autoConvert_v1_Build_To_api_Build(in, out, s) +} + +func autoConvert_api_Build_To_v1_Build(in *api.Build, out *Build, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_BuildSpec_To_v1_BuildSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_BuildStatus_To_v1_BuildStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Build_To_v1_Build(in *api.Build, out *Build, s conversion.Scope) error { + return autoConvert_api_Build_To_v1_Build(in, out, s) +} + +func autoConvert_v1_BuildConfig_To_api_BuildConfig(in *BuildConfig, out *api.BuildConfig, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_v1_BuildConfigSpec_To_api_BuildConfigSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_BuildConfigStatus_To_api_BuildConfigStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_BuildConfig_To_v1_BuildConfig(in *api.BuildConfig, out *BuildConfig, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if err := Convert_api_BuildConfigSpec_To_v1_BuildConfigSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_BuildConfigStatus_To_v1_BuildConfigStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_BuildConfig_To_v1_BuildConfig(in *api.BuildConfig, out *BuildConfig, s conversion.Scope) error { + return autoConvert_api_BuildConfig_To_v1_BuildConfig(in, out, s) +} + +func autoConvert_v1_BuildConfigList_To_api_BuildConfigList(in *BuildConfigList, out *api.BuildConfigList, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := pkg_api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.BuildConfig, len(*in)) + for i := range *in { + if err := Convert_v1_BuildConfig_To_api_BuildConfig(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_BuildConfigList_To_api_BuildConfigList(in *BuildConfigList, out *api.BuildConfigList, s conversion.Scope) error { + return autoConvert_v1_BuildConfigList_To_api_BuildConfigList(in, out, s) +} + +func autoConvert_api_BuildConfigList_To_v1_BuildConfigList(in *api.BuildConfigList, out *BuildConfigList, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := pkg_api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BuildConfig, len(*in)) + for i := range *in { + if err := Convert_api_BuildConfig_To_v1_BuildConfig(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_BuildConfigList_To_v1_BuildConfigList(in *api.BuildConfigList, out *BuildConfigList, s conversion.Scope) error { + return autoConvert_api_BuildConfigList_To_v1_BuildConfigList(in, out, s) +} + +func autoConvert_v1_BuildConfigSpec_To_api_BuildConfigSpec(in *BuildConfigSpec, out *api.BuildConfigSpec, s conversion.Scope) error { + SetDefaults_BuildConfigSpec(in) + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]api.BuildTriggerPolicy, len(*in)) + for i := range *in { + if err := Convert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Triggers = nil + } + out.RunPolicy = api.BuildRunPolicy(in.RunPolicy) + if err := Convert_v1_CommonSpec_To_api_CommonSpec(&in.CommonSpec, &out.CommonSpec, s); err != nil { + return err + } + return nil +} + +func Convert_v1_BuildConfigSpec_To_api_BuildConfigSpec(in *BuildConfigSpec, out *api.BuildConfigSpec, s conversion.Scope) error { + return autoConvert_v1_BuildConfigSpec_To_api_BuildConfigSpec(in, out, s) +} + +func autoConvert_api_BuildConfigSpec_To_v1_BuildConfigSpec(in *api.BuildConfigSpec, out *BuildConfigSpec, s conversion.Scope) error { + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]BuildTriggerPolicy, len(*in)) + for i := range *in { + if err := Convert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Triggers = nil + } + out.RunPolicy = BuildRunPolicy(in.RunPolicy) + if err := Convert_api_CommonSpec_To_v1_CommonSpec(&in.CommonSpec, &out.CommonSpec, s); err != nil { + return err + } + return nil +} + +func Convert_api_BuildConfigSpec_To_v1_BuildConfigSpec(in *api.BuildConfigSpec, out *BuildConfigSpec, s conversion.Scope) error { + return autoConvert_api_BuildConfigSpec_To_v1_BuildConfigSpec(in, out, s) +} + +func autoConvert_v1_BuildConfigStatus_To_api_BuildConfigStatus(in *BuildConfigStatus, out *api.BuildConfigStatus, s conversion.Scope) error { + out.LastVersion = in.LastVersion + return nil +} + +func Convert_v1_BuildConfigStatus_To_api_BuildConfigStatus(in *BuildConfigStatus, out *api.BuildConfigStatus, s conversion.Scope) error { + return autoConvert_v1_BuildConfigStatus_To_api_BuildConfigStatus(in, out, s) +} + +func autoConvert_api_BuildConfigStatus_To_v1_BuildConfigStatus(in *api.BuildConfigStatus, out *BuildConfigStatus, s conversion.Scope) error { + out.LastVersion = in.LastVersion + return nil +} + +func Convert_api_BuildConfigStatus_To_v1_BuildConfigStatus(in *api.BuildConfigStatus, out *BuildConfigStatus, s conversion.Scope) error { + return autoConvert_api_BuildConfigStatus_To_v1_BuildConfigStatus(in, out, s) +} + +func autoConvert_v1_BuildList_To_api_BuildList(in *BuildList, out *api.BuildList, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := pkg_api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Build, len(*in)) + for i := range *in { + if err := Convert_v1_Build_To_api_Build(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_BuildList_To_api_BuildList(in *BuildList, out *api.BuildList, s conversion.Scope) error { + return autoConvert_v1_BuildList_To_api_BuildList(in, out, s) +} + +func autoConvert_api_BuildList_To_v1_BuildList(in *api.BuildList, out *BuildList, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := pkg_api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + if err := Convert_api_Build_To_v1_Build(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_api_BuildList_To_v1_BuildList(in *api.BuildList, out *BuildList, s conversion.Scope) error { + return autoConvert_api_BuildList_To_v1_BuildList(in, out, s) +} + +func autoConvert_v1_BuildLog_To_api_BuildLog(in *BuildLog, out *api.BuildLog, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + return nil +} + +func Convert_v1_BuildLog_To_api_BuildLog(in *BuildLog, out *api.BuildLog, s conversion.Scope) error { + return autoConvert_v1_BuildLog_To_api_BuildLog(in, out, s) +} + +func autoConvert_api_BuildLog_To_v1_BuildLog(in *api.BuildLog, out *BuildLog, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + return nil +} + +func Convert_api_BuildLog_To_v1_BuildLog(in *api.BuildLog, out *BuildLog, s conversion.Scope) error { + return autoConvert_api_BuildLog_To_v1_BuildLog(in, out, s) +} + +func autoConvert_v1_BuildLogOptions_To_api_BuildLogOptions(in *BuildLogOptions, out *api.BuildLogOptions, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = in.SinceSeconds + out.SinceTime = in.SinceTime + out.Timestamps = in.Timestamps + out.TailLines = in.TailLines + out.LimitBytes = in.LimitBytes + out.NoWait = in.NoWait + out.Version = in.Version + return nil +} + +func Convert_v1_BuildLogOptions_To_api_BuildLogOptions(in *BuildLogOptions, out *api.BuildLogOptions, s conversion.Scope) error { + return autoConvert_v1_BuildLogOptions_To_api_BuildLogOptions(in, out, s) +} + +func autoConvert_api_BuildLogOptions_To_v1_BuildLogOptions(in *api.BuildLogOptions, out *BuildLogOptions, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = in.SinceSeconds + out.SinceTime = in.SinceTime + out.Timestamps = in.Timestamps + out.TailLines = in.TailLines + out.LimitBytes = in.LimitBytes + out.NoWait = in.NoWait + out.Version = in.Version + return nil +} + +func Convert_api_BuildLogOptions_To_v1_BuildLogOptions(in *api.BuildLogOptions, out *BuildLogOptions, s conversion.Scope) error { + return autoConvert_api_BuildLogOptions_To_v1_BuildLogOptions(in, out, s) +} + +func autoConvert_v1_BuildOutput_To_api_BuildOutput(in *BuildOutput, out *api.BuildOutput, s conversion.Scope) error { + if in.To != nil { + in, out := &in.To, &out.To + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.To = nil + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(pkg_api.LocalObjectReference) + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PushSecret = nil + } + return nil +} + +func autoConvert_api_BuildOutput_To_v1_BuildOutput(in *api.BuildOutput, out *BuildOutput, s conversion.Scope) error { + if in.To != nil { + in, out := &in.To, &out.To + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.To = nil + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(api_v1.LocalObjectReference) + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PushSecret = nil + } + return nil +} + +func Convert_api_BuildOutput_To_v1_BuildOutput(in *api.BuildOutput, out *BuildOutput, s conversion.Scope) error { + return autoConvert_api_BuildOutput_To_v1_BuildOutput(in, out, s) +} + +func autoConvert_v1_BuildPostCommitSpec_To_api_BuildPostCommitSpec(in *BuildPostCommitSpec, out *api.BuildPostCommitSpec, s conversion.Scope) error { + out.Command = in.Command + out.Args = in.Args + out.Script = in.Script + return nil +} + +func Convert_v1_BuildPostCommitSpec_To_api_BuildPostCommitSpec(in *BuildPostCommitSpec, out *api.BuildPostCommitSpec, s conversion.Scope) error { + return autoConvert_v1_BuildPostCommitSpec_To_api_BuildPostCommitSpec(in, out, s) +} + +func autoConvert_api_BuildPostCommitSpec_To_v1_BuildPostCommitSpec(in *api.BuildPostCommitSpec, out *BuildPostCommitSpec, s conversion.Scope) error { + out.Command = in.Command + out.Args = in.Args + out.Script = in.Script + return nil +} + +func Convert_api_BuildPostCommitSpec_To_v1_BuildPostCommitSpec(in *api.BuildPostCommitSpec, out *BuildPostCommitSpec, s conversion.Scope) error { + return autoConvert_api_BuildPostCommitSpec_To_v1_BuildPostCommitSpec(in, out, s) +} + +func autoConvert_v1_BuildRequest_To_api_BuildRequest(in *BuildRequest, out *api.BuildRequest, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(api.SourceRevision) + if err := Convert_v1_SourceRevision_To_api_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.TriggeredByImage = nil + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.From = nil + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(api.BinaryBuildSource) + if err := Convert_v1_BinaryBuildSource_To_api_BinaryBuildSource(*in, *out, s); err != nil { + return err + } + } else { + out.Binary = nil + } + out.LastVersion = in.LastVersion + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]pkg_api.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]api.BuildTriggerCause, len(*in)) + for i := range *in { + if err := Convert_v1_BuildTriggerCause_To_api_BuildTriggerCause(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.TriggeredBy = nil + } + return nil +} + +func Convert_v1_BuildRequest_To_api_BuildRequest(in *BuildRequest, out *api.BuildRequest, s conversion.Scope) error { + return autoConvert_v1_BuildRequest_To_api_BuildRequest(in, out, s) +} + +func autoConvert_api_BuildRequest_To_v1_BuildRequest(in *api.BuildRequest, out *BuildRequest, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api_v1.Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { + return err + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := Convert_api_SourceRevision_To_v1_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.TriggeredByImage = nil + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.From = nil + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + if err := Convert_api_BinaryBuildSource_To_v1_BinaryBuildSource(*in, *out, s); err != nil { + return err + } + } else { + out.Binary = nil + } + out.LastVersion = in.LastVersion + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + if err := Convert_api_BuildTriggerCause_To_v1_BuildTriggerCause(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.TriggeredBy = nil + } + return nil +} + +func Convert_api_BuildRequest_To_v1_BuildRequest(in *api.BuildRequest, out *BuildRequest, s conversion.Scope) error { + return autoConvert_api_BuildRequest_To_v1_BuildRequest(in, out, s) +} + +func autoConvert_v1_BuildSource_To_api_BuildSource(in *BuildSource, out *api.BuildSource, s conversion.Scope) error { + SetDefaults_BuildSource(in) + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(api.BinaryBuildSource) + if err := Convert_v1_BinaryBuildSource_To_api_BinaryBuildSource(*in, *out, s); err != nil { + return err + } + } else { + out.Binary = nil + } + out.Dockerfile = in.Dockerfile + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(api.GitBuildSource) + if err := Convert_v1_GitBuildSource_To_api_GitBuildSource(*in, *out, s); err != nil { + return err + } + } else { + out.Git = nil + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]api.ImageSource, len(*in)) + for i := range *in { + if err := Convert_v1_ImageSource_To_api_ImageSource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Images = nil + } + out.ContextDir = in.ContextDir + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(pkg_api.LocalObjectReference) + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.SourceSecret = nil + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]api.SecretBuildSource, len(*in)) + for i := range *in { + if err := Convert_v1_SecretBuildSource_To_api_SecretBuildSource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Secrets = nil + } + return nil +} + +func Convert_v1_BuildSource_To_api_BuildSource(in *BuildSource, out *api.BuildSource, s conversion.Scope) error { + return autoConvert_v1_BuildSource_To_api_BuildSource(in, out, s) +} + +func autoConvert_api_BuildSource_To_v1_BuildSource(in *api.BuildSource, out *BuildSource, s conversion.Scope) error { + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + if err := Convert_api_BinaryBuildSource_To_v1_BinaryBuildSource(*in, *out, s); err != nil { + return err + } + } else { + out.Binary = nil + } + out.Dockerfile = in.Dockerfile + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitBuildSource) + if err := Convert_api_GitBuildSource_To_v1_GitBuildSource(*in, *out, s); err != nil { + return err + } + } else { + out.Git = nil + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageSource, len(*in)) + for i := range *in { + if err := Convert_api_ImageSource_To_v1_ImageSource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Images = nil + } + out.ContextDir = in.ContextDir + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(api_v1.LocalObjectReference) + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.SourceSecret = nil + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretBuildSource, len(*in)) + for i := range *in { + if err := Convert_api_SecretBuildSource_To_v1_SecretBuildSource(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Secrets = nil + } + return nil +} + +func autoConvert_v1_BuildSpec_To_api_BuildSpec(in *BuildSpec, out *api.BuildSpec, s conversion.Scope) error { + if err := Convert_v1_CommonSpec_To_api_CommonSpec(&in.CommonSpec, &out.CommonSpec, s); err != nil { + return err + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]api.BuildTriggerCause, len(*in)) + for i := range *in { + if err := Convert_v1_BuildTriggerCause_To_api_BuildTriggerCause(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.TriggeredBy = nil + } + return nil +} + +func Convert_v1_BuildSpec_To_api_BuildSpec(in *BuildSpec, out *api.BuildSpec, s conversion.Scope) error { + return autoConvert_v1_BuildSpec_To_api_BuildSpec(in, out, s) +} + +func autoConvert_api_BuildSpec_To_v1_BuildSpec(in *api.BuildSpec, out *BuildSpec, s conversion.Scope) error { + if err := Convert_api_CommonSpec_To_v1_CommonSpec(&in.CommonSpec, &out.CommonSpec, s); err != nil { + return err + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + if err := Convert_api_BuildTriggerCause_To_v1_BuildTriggerCause(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.TriggeredBy = nil + } + return nil +} + +func Convert_api_BuildSpec_To_v1_BuildSpec(in *api.BuildSpec, out *BuildSpec, s conversion.Scope) error { + return autoConvert_api_BuildSpec_To_v1_BuildSpec(in, out, s) +} + +func autoConvert_v1_BuildStatus_To_api_BuildStatus(in *BuildStatus, out *api.BuildStatus, s conversion.Scope) error { + out.Phase = api.BuildPhase(in.Phase) + out.Cancelled = in.Cancelled + out.Reason = api.StatusReason(in.Reason) + out.Message = in.Message + out.StartTimestamp = in.StartTimestamp + out.CompletionTimestamp = in.CompletionTimestamp + out.Duration = time.Duration(in.Duration) + out.OutputDockerImageReference = in.OutputDockerImageReference + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.Config = nil + } + return nil +} + +func Convert_v1_BuildStatus_To_api_BuildStatus(in *BuildStatus, out *api.BuildStatus, s conversion.Scope) error { + return autoConvert_v1_BuildStatus_To_api_BuildStatus(in, out, s) +} + +func autoConvert_api_BuildStatus_To_v1_BuildStatus(in *api.BuildStatus, out *BuildStatus, s conversion.Scope) error { + out.Phase = BuildPhase(in.Phase) + out.Cancelled = in.Cancelled + out.Reason = StatusReason(in.Reason) + out.Message = in.Message + out.StartTimestamp = in.StartTimestamp + out.CompletionTimestamp = in.CompletionTimestamp + out.Duration = time.Duration(in.Duration) + out.OutputDockerImageReference = in.OutputDockerImageReference + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.Config = nil + } + return nil +} + +func Convert_api_BuildStatus_To_v1_BuildStatus(in *api.BuildStatus, out *BuildStatus, s conversion.Scope) error { + return autoConvert_api_BuildStatus_To_v1_BuildStatus(in, out, s) +} + +func autoConvert_v1_BuildStrategy_To_api_BuildStrategy(in *BuildStrategy, out *api.BuildStrategy, s conversion.Scope) error { + SetDefaults_BuildStrategy(in) + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(api.DockerBuildStrategy) + if err := Convert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.DockerStrategy = nil + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(api.SourceBuildStrategy) + if err := Convert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.SourceStrategy = nil + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(api.CustomBuildStrategy) + if err := Convert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.CustomStrategy = nil + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(api.JenkinsPipelineBuildStrategy) + if err := Convert_v1_JenkinsPipelineBuildStrategy_To_api_JenkinsPipelineBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.JenkinsPipelineStrategy = nil + } + return nil +} + +func Convert_v1_BuildStrategy_To_api_BuildStrategy(in *BuildStrategy, out *api.BuildStrategy, s conversion.Scope) error { + return autoConvert_v1_BuildStrategy_To_api_BuildStrategy(in, out, s) +} + +func autoConvert_api_BuildStrategy_To_v1_BuildStrategy(in *api.BuildStrategy, out *BuildStrategy, s conversion.Scope) error { + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(DockerBuildStrategy) + if err := Convert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.DockerStrategy = nil + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(SourceBuildStrategy) + if err := Convert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.SourceStrategy = nil + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(CustomBuildStrategy) + if err := Convert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.CustomStrategy = nil + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(JenkinsPipelineBuildStrategy) + if err := Convert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.JenkinsPipelineStrategy = nil + } + return nil +} + +func autoConvert_v1_BuildTriggerCause_To_api_BuildTriggerCause(in *BuildTriggerCause, out *api.BuildTriggerCause, s conversion.Scope) error { + out.Message = in.Message + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(api.GenericWebHookCause) + if err := Convert_v1_GenericWebHookCause_To_api_GenericWebHookCause(*in, *out, s); err != nil { + return err + } + } else { + out.GenericWebHook = nil + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(api.GitHubWebHookCause) + if err := Convert_v1_GitHubWebHookCause_To_api_GitHubWebHookCause(*in, *out, s); err != nil { + return err + } + } else { + out.GitHubWebHook = nil + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(api.ImageChangeCause) + if err := Convert_v1_ImageChangeCause_To_api_ImageChangeCause(*in, *out, s); err != nil { + return err + } + } else { + out.ImageChangeBuild = nil + } + return nil +} + +func Convert_v1_BuildTriggerCause_To_api_BuildTriggerCause(in *BuildTriggerCause, out *api.BuildTriggerCause, s conversion.Scope) error { + return autoConvert_v1_BuildTriggerCause_To_api_BuildTriggerCause(in, out, s) +} + +func autoConvert_api_BuildTriggerCause_To_v1_BuildTriggerCause(in *api.BuildTriggerCause, out *BuildTriggerCause, s conversion.Scope) error { + out.Message = in.Message + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(GenericWebHookCause) + if err := Convert_api_GenericWebHookCause_To_v1_GenericWebHookCause(*in, *out, s); err != nil { + return err + } + } else { + out.GenericWebHook = nil + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(GitHubWebHookCause) + if err := Convert_api_GitHubWebHookCause_To_v1_GitHubWebHookCause(*in, *out, s); err != nil { + return err + } + } else { + out.GitHubWebHook = nil + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(ImageChangeCause) + if err := Convert_api_ImageChangeCause_To_v1_ImageChangeCause(*in, *out, s); err != nil { + return err + } + } else { + out.ImageChangeBuild = nil + } + return nil +} + +func Convert_api_BuildTriggerCause_To_v1_BuildTriggerCause(in *api.BuildTriggerCause, out *BuildTriggerCause, s conversion.Scope) error { + return autoConvert_api_BuildTriggerCause_To_v1_BuildTriggerCause(in, out, s) +} + +func autoConvert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy(in *BuildTriggerPolicy, out *api.BuildTriggerPolicy, s conversion.Scope) error { + SetDefaults_BuildTriggerPolicy(in) + out.Type = api.BuildTriggerType(in.Type) + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(api.WebHookTrigger) + if err := Convert_v1_WebHookTrigger_To_api_WebHookTrigger(*in, *out, s); err != nil { + return err + } + } else { + out.GitHubWebHook = nil + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(api.WebHookTrigger) + if err := Convert_v1_WebHookTrigger_To_api_WebHookTrigger(*in, *out, s); err != nil { + return err + } + } else { + out.GenericWebHook = nil + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(api.ImageChangeTrigger) + if err := Convert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger(*in, *out, s); err != nil { + return err + } + } else { + out.ImageChange = nil + } + return nil +} + +func autoConvert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy(in *api.BuildTriggerPolicy, out *BuildTriggerPolicy, s conversion.Scope) error { + out.Type = BuildTriggerType(in.Type) + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(WebHookTrigger) + if err := Convert_api_WebHookTrigger_To_v1_WebHookTrigger(*in, *out, s); err != nil { + return err + } + } else { + out.GitHubWebHook = nil + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(WebHookTrigger) + if err := Convert_api_WebHookTrigger_To_v1_WebHookTrigger(*in, *out, s); err != nil { + return err + } + } else { + out.GenericWebHook = nil + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(ImageChangeTrigger) + if err := Convert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger(*in, *out, s); err != nil { + return err + } + } else { + out.ImageChange = nil + } + return nil +} + +func Convert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy(in *api.BuildTriggerPolicy, out *BuildTriggerPolicy, s conversion.Scope) error { + return autoConvert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy(in, out, s) +} + +func autoConvert_v1_CommonSpec_To_api_CommonSpec(in *CommonSpec, out *api.CommonSpec, s conversion.Scope) error { + out.ServiceAccount = in.ServiceAccount + if err := Convert_v1_BuildSource_To_api_BuildSource(&in.Source, &out.Source, s); err != nil { + return err + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(api.SourceRevision) + if err := Convert_v1_SourceRevision_To_api_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + if err := Convert_v1_BuildStrategy_To_api_BuildStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if err := Convert_v1_BuildOutput_To_api_BuildOutput(&in.Output, &out.Output, s); err != nil { + return err + } + if err := api_v1.Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + if err := Convert_v1_BuildPostCommitSpec_To_api_BuildPostCommitSpec(&in.PostCommit, &out.PostCommit, s); err != nil { + return err + } + out.CompletionDeadlineSeconds = in.CompletionDeadlineSeconds + return nil +} + +func Convert_v1_CommonSpec_To_api_CommonSpec(in *CommonSpec, out *api.CommonSpec, s conversion.Scope) error { + return autoConvert_v1_CommonSpec_To_api_CommonSpec(in, out, s) +} + +func autoConvert_api_CommonSpec_To_v1_CommonSpec(in *api.CommonSpec, out *CommonSpec, s conversion.Scope) error { + out.ServiceAccount = in.ServiceAccount + if err := Convert_api_BuildSource_To_v1_BuildSource(&in.Source, &out.Source, s); err != nil { + return err + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := Convert_api_SourceRevision_To_v1_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + if err := Convert_api_BuildStrategy_To_v1_BuildStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if err := Convert_api_BuildOutput_To_v1_BuildOutput(&in.Output, &out.Output, s); err != nil { + return err + } + if err := api_v1.Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + if err := Convert_api_BuildPostCommitSpec_To_v1_BuildPostCommitSpec(&in.PostCommit, &out.PostCommit, s); err != nil { + return err + } + out.CompletionDeadlineSeconds = in.CompletionDeadlineSeconds + return nil +} + +func Convert_api_CommonSpec_To_v1_CommonSpec(in *api.CommonSpec, out *CommonSpec, s conversion.Scope) error { + return autoConvert_api_CommonSpec_To_v1_CommonSpec(in, out, s) +} + +func autoConvert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy(in *CustomBuildStrategy, out *api.CustomBuildStrategy, s conversion.Scope) error { + SetDefaults_CustomBuildStrategy(in) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil { + return err + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(pkg_api.LocalObjectReference) + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]pkg_api.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.ExposeDockerSocket = in.ExposeDockerSocket + out.ForcePull = in.ForcePull + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]api.SecretSpec, len(*in)) + for i := range *in { + if err := Convert_v1_SecretSpec_To_api_SecretSpec(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Secrets = nil + } + out.BuildAPIVersion = in.BuildAPIVersion + return nil +} + +func autoConvert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy(in *api.CustomBuildStrategy, out *CustomBuildStrategy, s conversion.Scope) error { + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.From, &out.From, s); err != nil { + return err + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.ExposeDockerSocket = in.ExposeDockerSocket + out.ForcePull = in.ForcePull + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretSpec, len(*in)) + for i := range *in { + if err := Convert_api_SecretSpec_To_v1_SecretSpec(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Secrets = nil + } + out.BuildAPIVersion = in.BuildAPIVersion + return nil +} + +func Convert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy(in *api.CustomBuildStrategy, out *CustomBuildStrategy, s conversion.Scope) error { + return autoConvert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy(in, out, s) +} + +func autoConvert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy(in *DockerBuildStrategy, out *api.DockerBuildStrategy, s conversion.Scope) error { + SetDefaults_DockerBuildStrategy(in) + if in.From != nil { + in, out := &in.From, &out.From + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.From = nil + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(pkg_api.LocalObjectReference) + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + out.NoCache = in.NoCache + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]pkg_api.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.ForcePull = in.ForcePull + out.DockerfilePath = in.DockerfilePath + return nil +} + +func autoConvert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy(in *api.DockerBuildStrategy, out *DockerBuildStrategy, s conversion.Scope) error { + if in.From != nil { + in, out := &in.From, &out.From + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.From = nil + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + out.NoCache = in.NoCache + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.ForcePull = in.ForcePull + out.DockerfilePath = in.DockerfilePath + return nil +} + +func Convert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy(in *api.DockerBuildStrategy, out *DockerBuildStrategy, s conversion.Scope) error { + return autoConvert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy(in, out, s) +} + +func autoConvert_v1_GenericWebHookCause_To_api_GenericWebHookCause(in *GenericWebHookCause, out *api.GenericWebHookCause, s conversion.Scope) error { + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(api.SourceRevision) + if err := Convert_v1_SourceRevision_To_api_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + out.Secret = in.Secret + return nil +} + +func Convert_v1_GenericWebHookCause_To_api_GenericWebHookCause(in *GenericWebHookCause, out *api.GenericWebHookCause, s conversion.Scope) error { + return autoConvert_v1_GenericWebHookCause_To_api_GenericWebHookCause(in, out, s) +} + +func autoConvert_api_GenericWebHookCause_To_v1_GenericWebHookCause(in *api.GenericWebHookCause, out *GenericWebHookCause, s conversion.Scope) error { + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := Convert_api_SourceRevision_To_v1_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + out.Secret = in.Secret + return nil +} + +func Convert_api_GenericWebHookCause_To_v1_GenericWebHookCause(in *api.GenericWebHookCause, out *GenericWebHookCause, s conversion.Scope) error { + return autoConvert_api_GenericWebHookCause_To_v1_GenericWebHookCause(in, out, s) +} + +func autoConvert_v1_GenericWebHookEvent_To_api_GenericWebHookEvent(in *GenericWebHookEvent, out *api.GenericWebHookEvent, s conversion.Scope) error { + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(api.GitInfo) + if err := Convert_v1_GitInfo_To_api_GitInfo(*in, *out, s); err != nil { + return err + } + } else { + out.Git = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]pkg_api.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + return nil +} + +func Convert_v1_GenericWebHookEvent_To_api_GenericWebHookEvent(in *GenericWebHookEvent, out *api.GenericWebHookEvent, s conversion.Scope) error { + return autoConvert_v1_GenericWebHookEvent_To_api_GenericWebHookEvent(in, out, s) +} + +func autoConvert_api_GenericWebHookEvent_To_v1_GenericWebHookEvent(in *api.GenericWebHookEvent, out *GenericWebHookEvent, s conversion.Scope) error { + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitInfo) + if err := Convert_api_GitInfo_To_v1_GitInfo(*in, *out, s); err != nil { + return err + } + } else { + out.Git = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + return nil +} + +func Convert_api_GenericWebHookEvent_To_v1_GenericWebHookEvent(in *api.GenericWebHookEvent, out *GenericWebHookEvent, s conversion.Scope) error { + return autoConvert_api_GenericWebHookEvent_To_v1_GenericWebHookEvent(in, out, s) +} + +func autoConvert_v1_GitBuildSource_To_api_GitBuildSource(in *GitBuildSource, out *api.GitBuildSource, s conversion.Scope) error { + out.URI = in.URI + out.Ref = in.Ref + out.HTTPProxy = in.HTTPProxy + out.HTTPSProxy = in.HTTPSProxy + return nil +} + +func Convert_v1_GitBuildSource_To_api_GitBuildSource(in *GitBuildSource, out *api.GitBuildSource, s conversion.Scope) error { + return autoConvert_v1_GitBuildSource_To_api_GitBuildSource(in, out, s) +} + +func autoConvert_api_GitBuildSource_To_v1_GitBuildSource(in *api.GitBuildSource, out *GitBuildSource, s conversion.Scope) error { + out.URI = in.URI + out.Ref = in.Ref + out.HTTPProxy = in.HTTPProxy + out.HTTPSProxy = in.HTTPSProxy + return nil +} + +func Convert_api_GitBuildSource_To_v1_GitBuildSource(in *api.GitBuildSource, out *GitBuildSource, s conversion.Scope) error { + return autoConvert_api_GitBuildSource_To_v1_GitBuildSource(in, out, s) +} + +func autoConvert_v1_GitHubWebHookCause_To_api_GitHubWebHookCause(in *GitHubWebHookCause, out *api.GitHubWebHookCause, s conversion.Scope) error { + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(api.SourceRevision) + if err := Convert_v1_SourceRevision_To_api_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + out.Secret = in.Secret + return nil +} + +func Convert_v1_GitHubWebHookCause_To_api_GitHubWebHookCause(in *GitHubWebHookCause, out *api.GitHubWebHookCause, s conversion.Scope) error { + return autoConvert_v1_GitHubWebHookCause_To_api_GitHubWebHookCause(in, out, s) +} + +func autoConvert_api_GitHubWebHookCause_To_v1_GitHubWebHookCause(in *api.GitHubWebHookCause, out *GitHubWebHookCause, s conversion.Scope) error { + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := Convert_api_SourceRevision_To_v1_SourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Revision = nil + } + out.Secret = in.Secret + return nil +} + +func Convert_api_GitHubWebHookCause_To_v1_GitHubWebHookCause(in *api.GitHubWebHookCause, out *GitHubWebHookCause, s conversion.Scope) error { + return autoConvert_api_GitHubWebHookCause_To_v1_GitHubWebHookCause(in, out, s) +} + +func autoConvert_v1_GitInfo_To_api_GitInfo(in *GitInfo, out *api.GitInfo, s conversion.Scope) error { + if err := Convert_v1_GitBuildSource_To_api_GitBuildSource(&in.GitBuildSource, &out.GitBuildSource, s); err != nil { + return err + } + if err := Convert_v1_GitSourceRevision_To_api_GitSourceRevision(&in.GitSourceRevision, &out.GitSourceRevision, s); err != nil { + return err + } + return nil +} + +func Convert_v1_GitInfo_To_api_GitInfo(in *GitInfo, out *api.GitInfo, s conversion.Scope) error { + return autoConvert_v1_GitInfo_To_api_GitInfo(in, out, s) +} + +func autoConvert_api_GitInfo_To_v1_GitInfo(in *api.GitInfo, out *GitInfo, s conversion.Scope) error { + if err := Convert_api_GitBuildSource_To_v1_GitBuildSource(&in.GitBuildSource, &out.GitBuildSource, s); err != nil { + return err + } + if err := Convert_api_GitSourceRevision_To_v1_GitSourceRevision(&in.GitSourceRevision, &out.GitSourceRevision, s); err != nil { + return err + } + return nil +} + +func Convert_api_GitInfo_To_v1_GitInfo(in *api.GitInfo, out *GitInfo, s conversion.Scope) error { + return autoConvert_api_GitInfo_To_v1_GitInfo(in, out, s) +} + +func autoConvert_v1_GitSourceRevision_To_api_GitSourceRevision(in *GitSourceRevision, out *api.GitSourceRevision, s conversion.Scope) error { + out.Commit = in.Commit + if err := Convert_v1_SourceControlUser_To_api_SourceControlUser(&in.Author, &out.Author, s); err != nil { + return err + } + if err := Convert_v1_SourceControlUser_To_api_SourceControlUser(&in.Committer, &out.Committer, s); err != nil { + return err + } + out.Message = in.Message + return nil +} + +func Convert_v1_GitSourceRevision_To_api_GitSourceRevision(in *GitSourceRevision, out *api.GitSourceRevision, s conversion.Scope) error { + return autoConvert_v1_GitSourceRevision_To_api_GitSourceRevision(in, out, s) +} + +func autoConvert_api_GitSourceRevision_To_v1_GitSourceRevision(in *api.GitSourceRevision, out *GitSourceRevision, s conversion.Scope) error { + out.Commit = in.Commit + if err := Convert_api_SourceControlUser_To_v1_SourceControlUser(&in.Author, &out.Author, s); err != nil { + return err + } + if err := Convert_api_SourceControlUser_To_v1_SourceControlUser(&in.Committer, &out.Committer, s); err != nil { + return err + } + out.Message = in.Message + return nil +} + +func Convert_api_GitSourceRevision_To_v1_GitSourceRevision(in *api.GitSourceRevision, out *GitSourceRevision, s conversion.Scope) error { + return autoConvert_api_GitSourceRevision_To_v1_GitSourceRevision(in, out, s) +} + +func autoConvert_v1_ImageChangeCause_To_api_ImageChangeCause(in *ImageChangeCause, out *api.ImageChangeCause, s conversion.Scope) error { + out.ImageID = in.ImageID + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.FromRef = nil + } + return nil +} + +func Convert_v1_ImageChangeCause_To_api_ImageChangeCause(in *ImageChangeCause, out *api.ImageChangeCause, s conversion.Scope) error { + return autoConvert_v1_ImageChangeCause_To_api_ImageChangeCause(in, out, s) +} + +func autoConvert_api_ImageChangeCause_To_v1_ImageChangeCause(in *api.ImageChangeCause, out *ImageChangeCause, s conversion.Scope) error { + out.ImageID = in.ImageID + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.FromRef = nil + } + return nil +} + +func Convert_api_ImageChangeCause_To_v1_ImageChangeCause(in *api.ImageChangeCause, out *ImageChangeCause, s conversion.Scope) error { + return autoConvert_api_ImageChangeCause_To_v1_ImageChangeCause(in, out, s) +} + +func autoConvert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger(in *ImageChangeTrigger, out *api.ImageChangeTrigger, s conversion.Scope) error { + out.LastTriggeredImageID = in.LastTriggeredImageID + if in.From != nil { + in, out := &in.From, &out.From + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.From = nil + } + return nil +} + +func Convert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger(in *ImageChangeTrigger, out *api.ImageChangeTrigger, s conversion.Scope) error { + return autoConvert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger(in, out, s) +} + +func autoConvert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger(in *api.ImageChangeTrigger, out *ImageChangeTrigger, s conversion.Scope) error { + out.LastTriggeredImageID = in.LastTriggeredImageID + if in.From != nil { + in, out := &in.From, &out.From + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.From = nil + } + return nil +} + +func Convert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger(in *api.ImageChangeTrigger, out *ImageChangeTrigger, s conversion.Scope) error { + return autoConvert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger(in, out, s) +} + +func autoConvert_v1_ImageSource_To_api_ImageSource(in *ImageSource, out *api.ImageSource, s conversion.Scope) error { + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil { + return err + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]api.ImageSourcePath, len(*in)) + for i := range *in { + if err := Convert_v1_ImageSourcePath_To_api_ImageSourcePath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Paths = nil + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(pkg_api.LocalObjectReference) + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + return nil +} + +func Convert_v1_ImageSource_To_api_ImageSource(in *ImageSource, out *api.ImageSource, s conversion.Scope) error { + return autoConvert_v1_ImageSource_To_api_ImageSource(in, out, s) +} + +func autoConvert_api_ImageSource_To_v1_ImageSource(in *api.ImageSource, out *ImageSource, s conversion.Scope) error { + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.From, &out.From, s); err != nil { + return err + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ImageSourcePath, len(*in)) + for i := range *in { + if err := Convert_api_ImageSourcePath_To_v1_ImageSourcePath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Paths = nil + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + return nil +} + +func Convert_api_ImageSource_To_v1_ImageSource(in *api.ImageSource, out *ImageSource, s conversion.Scope) error { + return autoConvert_api_ImageSource_To_v1_ImageSource(in, out, s) +} + +func autoConvert_v1_ImageSourcePath_To_api_ImageSourcePath(in *ImageSourcePath, out *api.ImageSourcePath, s conversion.Scope) error { + out.SourcePath = in.SourcePath + out.DestinationDir = in.DestinationDir + return nil +} + +func Convert_v1_ImageSourcePath_To_api_ImageSourcePath(in *ImageSourcePath, out *api.ImageSourcePath, s conversion.Scope) error { + return autoConvert_v1_ImageSourcePath_To_api_ImageSourcePath(in, out, s) +} + +func autoConvert_api_ImageSourcePath_To_v1_ImageSourcePath(in *api.ImageSourcePath, out *ImageSourcePath, s conversion.Scope) error { + out.SourcePath = in.SourcePath + out.DestinationDir = in.DestinationDir + return nil +} + +func Convert_api_ImageSourcePath_To_v1_ImageSourcePath(in *api.ImageSourcePath, out *ImageSourcePath, s conversion.Scope) error { + return autoConvert_api_ImageSourcePath_To_v1_ImageSourcePath(in, out, s) +} + +func autoConvert_v1_JenkinsPipelineBuildStrategy_To_api_JenkinsPipelineBuildStrategy(in *JenkinsPipelineBuildStrategy, out *api.JenkinsPipelineBuildStrategy, s conversion.Scope) error { + out.JenkinsfilePath = in.JenkinsfilePath + out.Jenkinsfile = in.Jenkinsfile + return nil +} + +func Convert_v1_JenkinsPipelineBuildStrategy_To_api_JenkinsPipelineBuildStrategy(in *JenkinsPipelineBuildStrategy, out *api.JenkinsPipelineBuildStrategy, s conversion.Scope) error { + return autoConvert_v1_JenkinsPipelineBuildStrategy_To_api_JenkinsPipelineBuildStrategy(in, out, s) +} + +func autoConvert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy(in *api.JenkinsPipelineBuildStrategy, out *JenkinsPipelineBuildStrategy, s conversion.Scope) error { + out.JenkinsfilePath = in.JenkinsfilePath + out.Jenkinsfile = in.Jenkinsfile + return nil +} + +func Convert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy(in *api.JenkinsPipelineBuildStrategy, out *JenkinsPipelineBuildStrategy, s conversion.Scope) error { + return autoConvert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy(in, out, s) +} + +func autoConvert_v1_SecretBuildSource_To_api_SecretBuildSource(in *SecretBuildSource, out *api.SecretBuildSource, s conversion.Scope) error { + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.Secret, &out.Secret, s); err != nil { + return err + } + out.DestinationDir = in.DestinationDir + return nil +} + +func Convert_v1_SecretBuildSource_To_api_SecretBuildSource(in *SecretBuildSource, out *api.SecretBuildSource, s conversion.Scope) error { + return autoConvert_v1_SecretBuildSource_To_api_SecretBuildSource(in, out, s) +} + +func autoConvert_api_SecretBuildSource_To_v1_SecretBuildSource(in *api.SecretBuildSource, out *SecretBuildSource, s conversion.Scope) error { + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.Secret, &out.Secret, s); err != nil { + return err + } + out.DestinationDir = in.DestinationDir + return nil +} + +func Convert_api_SecretBuildSource_To_v1_SecretBuildSource(in *api.SecretBuildSource, out *SecretBuildSource, s conversion.Scope) error { + return autoConvert_api_SecretBuildSource_To_v1_SecretBuildSource(in, out, s) +} + +func autoConvert_v1_SecretSpec_To_api_SecretSpec(in *SecretSpec, out *api.SecretSpec, s conversion.Scope) error { + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.SecretSource, &out.SecretSource, s); err != nil { + return err + } + out.MountPath = in.MountPath + return nil +} + +func Convert_v1_SecretSpec_To_api_SecretSpec(in *SecretSpec, out *api.SecretSpec, s conversion.Scope) error { + return autoConvert_v1_SecretSpec_To_api_SecretSpec(in, out, s) +} + +func autoConvert_api_SecretSpec_To_v1_SecretSpec(in *api.SecretSpec, out *SecretSpec, s conversion.Scope) error { + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.SecretSource, &out.SecretSource, s); err != nil { + return err + } + out.MountPath = in.MountPath + return nil +} + +func Convert_api_SecretSpec_To_v1_SecretSpec(in *api.SecretSpec, out *SecretSpec, s conversion.Scope) error { + return autoConvert_api_SecretSpec_To_v1_SecretSpec(in, out, s) +} + +func autoConvert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy(in *SourceBuildStrategy, out *api.SourceBuildStrategy, s conversion.Scope) error { + SetDefaults_SourceBuildStrategy(in) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil { + return err + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(pkg_api.LocalObjectReference) + if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]pkg_api.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.Scripts = in.Scripts + out.Incremental = in.Incremental + out.ForcePull = in.ForcePull + if in.RuntimeImage != nil { + in, out := &in.RuntimeImage, &out.RuntimeImage + *out = new(pkg_api.ObjectReference) + if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.RuntimeImage = nil + } + if in.RuntimeArtifacts != nil { + in, out := &in.RuntimeArtifacts, &out.RuntimeArtifacts + *out = make([]api.ImageSourcePath, len(*in)) + for i := range *in { + if err := Convert_v1_ImageSourcePath_To_api_ImageSourcePath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RuntimeArtifacts = nil + } + return nil +} + +func autoConvert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy(in *api.SourceBuildStrategy, out *SourceBuildStrategy, s conversion.Scope) error { + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(&in.From, &out.From, s); err != nil { + return err + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + if err := api_v1.Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.PullSecret = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.Scripts = in.Scripts + out.Incremental = in.Incremental + out.ForcePull = in.ForcePull + if in.RuntimeImage != nil { + in, out := &in.RuntimeImage, &out.RuntimeImage + *out = new(api_v1.ObjectReference) + if err := api_v1.Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { + return err + } + } else { + out.RuntimeImage = nil + } + if in.RuntimeArtifacts != nil { + in, out := &in.RuntimeArtifacts, &out.RuntimeArtifacts + *out = make([]ImageSourcePath, len(*in)) + for i := range *in { + if err := Convert_api_ImageSourcePath_To_v1_ImageSourcePath(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RuntimeArtifacts = nil + } + return nil +} + +func Convert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy(in *api.SourceBuildStrategy, out *SourceBuildStrategy, s conversion.Scope) error { + return autoConvert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy(in, out, s) +} + +func autoConvert_v1_SourceControlUser_To_api_SourceControlUser(in *SourceControlUser, out *api.SourceControlUser, s conversion.Scope) error { + out.Name = in.Name + out.Email = in.Email + return nil +} + +func Convert_v1_SourceControlUser_To_api_SourceControlUser(in *SourceControlUser, out *api.SourceControlUser, s conversion.Scope) error { + return autoConvert_v1_SourceControlUser_To_api_SourceControlUser(in, out, s) +} + +func autoConvert_api_SourceControlUser_To_v1_SourceControlUser(in *api.SourceControlUser, out *SourceControlUser, s conversion.Scope) error { + out.Name = in.Name + out.Email = in.Email + return nil +} + +func Convert_api_SourceControlUser_To_v1_SourceControlUser(in *api.SourceControlUser, out *SourceControlUser, s conversion.Scope) error { + return autoConvert_api_SourceControlUser_To_v1_SourceControlUser(in, out, s) +} + +func autoConvert_v1_SourceRevision_To_api_SourceRevision(in *SourceRevision, out *api.SourceRevision, s conversion.Scope) error { + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(api.GitSourceRevision) + if err := Convert_v1_GitSourceRevision_To_api_GitSourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Git = nil + } + return nil +} + +func Convert_v1_SourceRevision_To_api_SourceRevision(in *SourceRevision, out *api.SourceRevision, s conversion.Scope) error { + return autoConvert_v1_SourceRevision_To_api_SourceRevision(in, out, s) +} + +func autoConvert_api_SourceRevision_To_v1_SourceRevision(in *api.SourceRevision, out *SourceRevision, s conversion.Scope) error { + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSourceRevision) + if err := Convert_api_GitSourceRevision_To_v1_GitSourceRevision(*in, *out, s); err != nil { + return err + } + } else { + out.Git = nil + } + return nil +} + +func autoConvert_v1_WebHookTrigger_To_api_WebHookTrigger(in *WebHookTrigger, out *api.WebHookTrigger, s conversion.Scope) error { + out.Secret = in.Secret + out.AllowEnv = in.AllowEnv + return nil +} + +func Convert_v1_WebHookTrigger_To_api_WebHookTrigger(in *WebHookTrigger, out *api.WebHookTrigger, s conversion.Scope) error { + return autoConvert_v1_WebHookTrigger_To_api_WebHookTrigger(in, out, s) +} + +func autoConvert_api_WebHookTrigger_To_v1_WebHookTrigger(in *api.WebHookTrigger, out *WebHookTrigger, s conversion.Scope) error { + out.Secret = in.Secret + out.AllowEnv = in.AllowEnv + return nil +} + +func Convert_api_WebHookTrigger_To_v1_WebHookTrigger(in *api.WebHookTrigger, out *WebHookTrigger, s conversion.Scope) error { + return autoConvert_api_WebHookTrigger_To_v1_WebHookTrigger(in, out, s) +} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..52189dfa --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,1014 @@ +// +build !ignore_autogenerated_openshift + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + api_v1 "k8s.io/kubernetes/pkg/api/v1" + conversion "k8s.io/kubernetes/pkg/conversion" + runtime "k8s.io/kubernetes/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BinaryBuildRequestOptions, InType: reflect.TypeOf(&BinaryBuildRequestOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BinaryBuildSource, InType: reflect.TypeOf(&BinaryBuildSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Build, InType: reflect.TypeOf(&Build{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildConfig, InType: reflect.TypeOf(&BuildConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildConfigList, InType: reflect.TypeOf(&BuildConfigList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildConfigSpec, InType: reflect.TypeOf(&BuildConfigSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildConfigStatus, InType: reflect.TypeOf(&BuildConfigStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildList, InType: reflect.TypeOf(&BuildList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildLog, InType: reflect.TypeOf(&BuildLog{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildLogOptions, InType: reflect.TypeOf(&BuildLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildOutput, InType: reflect.TypeOf(&BuildOutput{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildPostCommitSpec, InType: reflect.TypeOf(&BuildPostCommitSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildRequest, InType: reflect.TypeOf(&BuildRequest{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildSource, InType: reflect.TypeOf(&BuildSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildSpec, InType: reflect.TypeOf(&BuildSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildStatus, InType: reflect.TypeOf(&BuildStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildStrategy, InType: reflect.TypeOf(&BuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildTriggerCause, InType: reflect.TypeOf(&BuildTriggerCause{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_BuildTriggerPolicy, InType: reflect.TypeOf(&BuildTriggerPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CommonSpec, InType: reflect.TypeOf(&CommonSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CustomBuildStrategy, InType: reflect.TypeOf(&CustomBuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DockerBuildStrategy, InType: reflect.TypeOf(&DockerBuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GenericWebHookCause, InType: reflect.TypeOf(&GenericWebHookCause{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GenericWebHookEvent, InType: reflect.TypeOf(&GenericWebHookEvent{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitBuildSource, InType: reflect.TypeOf(&GitBuildSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitHubWebHookCause, InType: reflect.TypeOf(&GitHubWebHookCause{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitInfo, InType: reflect.TypeOf(&GitInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitSourceRevision, InType: reflect.TypeOf(&GitSourceRevision{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageChangeCause, InType: reflect.TypeOf(&ImageChangeCause{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageChangeTrigger, InType: reflect.TypeOf(&ImageChangeTrigger{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageSource, InType: reflect.TypeOf(&ImageSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageSourcePath, InType: reflect.TypeOf(&ImageSourcePath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JenkinsPipelineBuildStrategy, InType: reflect.TypeOf(&JenkinsPipelineBuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretBuildSource, InType: reflect.TypeOf(&SecretBuildSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretSpec, InType: reflect.TypeOf(&SecretSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SourceBuildStrategy, InType: reflect.TypeOf(&SourceBuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SourceControlUser, InType: reflect.TypeOf(&SourceControlUser{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SourceRevision, InType: reflect.TypeOf(&SourceRevision{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WebHookTrigger, InType: reflect.TypeOf(&WebHookTrigger{})}, + ) +} + +func DeepCopy_v1_BinaryBuildRequestOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BinaryBuildRequestOptions) + out := out.(*BinaryBuildRequestOptions) + out.TypeMeta = in.TypeMeta + if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.AsFile = in.AsFile + out.Commit = in.Commit + out.Message = in.Message + out.AuthorName = in.AuthorName + out.AuthorEmail = in.AuthorEmail + out.CommitterName = in.CommitterName + out.CommitterEmail = in.CommitterEmail + return nil + } +} + +func DeepCopy_v1_BinaryBuildSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BinaryBuildSource) + out := out.(*BinaryBuildSource) + out.AsFile = in.AsFile + return nil + } +} + +func DeepCopy_v1_Build(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Build) + out := out.(*Build) + out.TypeMeta = in.TypeMeta + if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1_BuildSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_BuildStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_BuildConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildConfig) + out := out.(*BuildConfig) + out.TypeMeta = in.TypeMeta + if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if err := DeepCopy_v1_BuildConfigSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + out.Status = in.Status + return nil + } +} + +func DeepCopy_v1_BuildConfigList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildConfigList) + out := out.(*BuildConfigList) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BuildConfig, len(*in)) + for i := range *in { + if err := DeepCopy_v1_BuildConfig(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil + } +} + +func DeepCopy_v1_BuildConfigSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildConfigSpec) + out := out.(*BuildConfigSpec) + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]BuildTriggerPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_v1_BuildTriggerPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Triggers = nil + } + out.RunPolicy = in.RunPolicy + if err := DeepCopy_v1_CommonSpec(&in.CommonSpec, &out.CommonSpec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_BuildConfigStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildConfigStatus) + out := out.(*BuildConfigStatus) + out.LastVersion = in.LastVersion + return nil + } +} + +func DeepCopy_v1_BuildList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildList) + out := out.(*BuildList) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Build(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil + } +} + +func DeepCopy_v1_BuildLog(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildLog) + out := out.(*BuildLog) + out.TypeMeta = in.TypeMeta + return nil + } +} + +func DeepCopy_v1_BuildLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildLogOptions) + out := out.(*BuildLogOptions) + out.TypeMeta = in.TypeMeta + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } else { + out.SinceSeconds = nil + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(unversioned.Time) + **out = (*in).DeepCopy() + } else { + out.SinceTime = nil + } + out.Timestamps = in.Timestamps + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } else { + out.TailLines = nil + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } else { + out.LimitBytes = nil + } + out.NoWait = in.NoWait + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } else { + out.Version = nil + } + return nil + } +} + +func DeepCopy_v1_BuildOutput(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildOutput) + out := out.(*BuildOutput) + if in.To != nil { + in, out := &in.To, &out.To + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.To = nil + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(api_v1.LocalObjectReference) + **out = **in + } else { + out.PushSecret = nil + } + return nil + } +} + +func DeepCopy_v1_BuildPostCommitSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildPostCommitSpec) + out := out.(*BuildPostCommitSpec) + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } else { + out.Command = nil + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } else { + out.Args = nil + } + out.Script = in.Script + return nil + } +} + +func DeepCopy_v1_BuildRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildRequest) + out := out.(*BuildRequest) + out.TypeMeta = in.TypeMeta + if err := api_v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := DeepCopy_v1_SourceRevision(*in, *out, c); err != nil { + return err + } + } else { + out.Revision = nil + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.TriggeredByImage = nil + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.From = nil + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } else { + out.Binary = nil + } + if in.LastVersion != nil { + in, out := &in.LastVersion, &out.LastVersion + *out = new(int64) + **out = **in + } else { + out.LastVersion = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Env = nil + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + if err := DeepCopy_v1_BuildTriggerCause(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.TriggeredBy = nil + } + return nil + } +} + +func DeepCopy_v1_BuildSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildSource) + out := out.(*BuildSource) + out.Type = in.Type + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } else { + out.Binary = nil + } + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(string) + **out = **in + } else { + out.Dockerfile = nil + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitBuildSource) + if err := DeepCopy_v1_GitBuildSource(*in, *out, c); err != nil { + return err + } + } else { + out.Git = nil + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageSource, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ImageSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Images = nil + } + out.ContextDir = in.ContextDir + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(api_v1.LocalObjectReference) + **out = **in + } else { + out.SourceSecret = nil + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretBuildSource, len(*in)) + for i := range *in { + (*out)[i] = (*in)[i] + } + } else { + out.Secrets = nil + } + return nil + } +} + +func DeepCopy_v1_BuildSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildSpec) + out := out.(*BuildSpec) + if err := DeepCopy_v1_CommonSpec(&in.CommonSpec, &out.CommonSpec, c); err != nil { + return err + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + if err := DeepCopy_v1_BuildTriggerCause(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.TriggeredBy = nil + } + return nil + } +} + +func DeepCopy_v1_BuildStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildStatus) + out := out.(*BuildStatus) + out.Phase = in.Phase + out.Cancelled = in.Cancelled + out.Reason = in.Reason + out.Message = in.Message + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = new(unversioned.Time) + **out = (*in).DeepCopy() + } else { + out.StartTimestamp = nil + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = new(unversioned.Time) + **out = (*in).DeepCopy() + } else { + out.CompletionTimestamp = nil + } + out.Duration = in.Duration + out.OutputDockerImageReference = in.OutputDockerImageReference + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.Config = nil + } + return nil + } +} + +func DeepCopy_v1_BuildStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildStrategy) + out := out.(*BuildStrategy) + out.Type = in.Type + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(DockerBuildStrategy) + if err := DeepCopy_v1_DockerBuildStrategy(*in, *out, c); err != nil { + return err + } + } else { + out.DockerStrategy = nil + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(SourceBuildStrategy) + if err := DeepCopy_v1_SourceBuildStrategy(*in, *out, c); err != nil { + return err + } + } else { + out.SourceStrategy = nil + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(CustomBuildStrategy) + if err := DeepCopy_v1_CustomBuildStrategy(*in, *out, c); err != nil { + return err + } + } else { + out.CustomStrategy = nil + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(JenkinsPipelineBuildStrategy) + **out = **in + } else { + out.JenkinsPipelineStrategy = nil + } + return nil + } +} + +func DeepCopy_v1_BuildTriggerCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildTriggerCause) + out := out.(*BuildTriggerCause) + out.Message = in.Message + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(GenericWebHookCause) + if err := DeepCopy_v1_GenericWebHookCause(*in, *out, c); err != nil { + return err + } + } else { + out.GenericWebHook = nil + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(GitHubWebHookCause) + if err := DeepCopy_v1_GitHubWebHookCause(*in, *out, c); err != nil { + return err + } + } else { + out.GitHubWebHook = nil + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(ImageChangeCause) + if err := DeepCopy_v1_ImageChangeCause(*in, *out, c); err != nil { + return err + } + } else { + out.ImageChangeBuild = nil + } + return nil + } +} + +func DeepCopy_v1_BuildTriggerPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*BuildTriggerPolicy) + out := out.(*BuildTriggerPolicy) + out.Type = in.Type + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(WebHookTrigger) + **out = **in + } else { + out.GitHubWebHook = nil + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(WebHookTrigger) + **out = **in + } else { + out.GenericWebHook = nil + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(ImageChangeTrigger) + if err := DeepCopy_v1_ImageChangeTrigger(*in, *out, c); err != nil { + return err + } + } else { + out.ImageChange = nil + } + return nil + } +} + +func DeepCopy_v1_CommonSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CommonSpec) + out := out.(*CommonSpec) + out.ServiceAccount = in.ServiceAccount + if err := DeepCopy_v1_BuildSource(&in.Source, &out.Source, c); err != nil { + return err + } + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := DeepCopy_v1_SourceRevision(*in, *out, c); err != nil { + return err + } + } else { + out.Revision = nil + } + if err := DeepCopy_v1_BuildStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if err := DeepCopy_v1_BuildOutput(&in.Output, &out.Output, c); err != nil { + return err + } + if err := api_v1.DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if err := DeepCopy_v1_BuildPostCommitSpec(&in.PostCommit, &out.PostCommit, c); err != nil { + return err + } + if in.CompletionDeadlineSeconds != nil { + in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds + *out = new(int64) + **out = **in + } else { + out.CompletionDeadlineSeconds = nil + } + return nil + } +} + +func DeepCopy_v1_CustomBuildStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomBuildStrategy) + out := out.(*CustomBuildStrategy) + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + **out = **in + } else { + out.PullSecret = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.ExposeDockerSocket = in.ExposeDockerSocket + out.ForcePull = in.ForcePull + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretSpec, len(*in)) + for i := range *in { + (*out)[i] = (*in)[i] + } + } else { + out.Secrets = nil + } + out.BuildAPIVersion = in.BuildAPIVersion + return nil + } +} + +func DeepCopy_v1_DockerBuildStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DockerBuildStrategy) + out := out.(*DockerBuildStrategy) + if in.From != nil { + in, out := &in.From, &out.From + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.From = nil + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + **out = **in + } else { + out.PullSecret = nil + } + out.NoCache = in.NoCache + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.ForcePull = in.ForcePull + out.DockerfilePath = in.DockerfilePath + return nil + } +} + +func DeepCopy_v1_GenericWebHookCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GenericWebHookCause) + out := out.(*GenericWebHookCause) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := DeepCopy_v1_SourceRevision(*in, *out, c); err != nil { + return err + } + } else { + out.Revision = nil + } + out.Secret = in.Secret + return nil + } +} + +func DeepCopy_v1_GenericWebHookEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GenericWebHookEvent) + out := out.(*GenericWebHookEvent) + out.Type = in.Type + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitInfo) + if err := DeepCopy_v1_GitInfo(*in, *out, c); err != nil { + return err + } + } else { + out.Git = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Env = nil + } + return nil + } +} + +func DeepCopy_v1_GitBuildSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitBuildSource) + out := out.(*GitBuildSource) + out.URI = in.URI + out.Ref = in.Ref + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } else { + out.HTTPProxy = nil + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } else { + out.HTTPSProxy = nil + } + return nil + } +} + +func DeepCopy_v1_GitHubWebHookCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitHubWebHookCause) + out := out.(*GitHubWebHookCause) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + if err := DeepCopy_v1_SourceRevision(*in, *out, c); err != nil { + return err + } + } else { + out.Revision = nil + } + out.Secret = in.Secret + return nil + } +} + +func DeepCopy_v1_GitInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitInfo) + out := out.(*GitInfo) + if err := DeepCopy_v1_GitBuildSource(&in.GitBuildSource, &out.GitBuildSource, c); err != nil { + return err + } + out.GitSourceRevision = in.GitSourceRevision + return nil + } +} + +func DeepCopy_v1_GitSourceRevision(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitSourceRevision) + out := out.(*GitSourceRevision) + out.Commit = in.Commit + out.Author = in.Author + out.Committer = in.Committer + out.Message = in.Message + return nil + } +} + +func DeepCopy_v1_ImageChangeCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ImageChangeCause) + out := out.(*ImageChangeCause) + out.ImageID = in.ImageID + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.FromRef = nil + } + return nil + } +} + +func DeepCopy_v1_ImageChangeTrigger(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ImageChangeTrigger) + out := out.(*ImageChangeTrigger) + out.LastTriggeredImageID = in.LastTriggeredImageID + if in.From != nil { + in, out := &in.From, &out.From + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.From = nil + } + return nil + } +} + +func DeepCopy_v1_ImageSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ImageSource) + out := out.(*ImageSource) + out.From = in.From + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ImageSourcePath, len(*in)) + for i := range *in { + (*out)[i] = (*in)[i] + } + } else { + out.Paths = nil + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + **out = **in + } else { + out.PullSecret = nil + } + return nil + } +} + +func DeepCopy_v1_ImageSourcePath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ImageSourcePath) + out := out.(*ImageSourcePath) + out.SourcePath = in.SourcePath + out.DestinationDir = in.DestinationDir + return nil + } +} + +func DeepCopy_v1_JenkinsPipelineBuildStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JenkinsPipelineBuildStrategy) + out := out.(*JenkinsPipelineBuildStrategy) + out.JenkinsfilePath = in.JenkinsfilePath + out.Jenkinsfile = in.Jenkinsfile + return nil + } +} + +func DeepCopy_v1_SecretBuildSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretBuildSource) + out := out.(*SecretBuildSource) + out.Secret = in.Secret + out.DestinationDir = in.DestinationDir + return nil + } +} + +func DeepCopy_v1_SecretSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretSpec) + out := out.(*SecretSpec) + out.SecretSource = in.SecretSource + out.MountPath = in.MountPath + return nil + } +} + +func DeepCopy_v1_SourceBuildStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SourceBuildStrategy) + out := out.(*SourceBuildStrategy) + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(api_v1.LocalObjectReference) + **out = **in + } else { + out.PullSecret = nil + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Env = nil + } + out.Scripts = in.Scripts + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } else { + out.Incremental = nil + } + out.ForcePull = in.ForcePull + if in.RuntimeImage != nil { + in, out := &in.RuntimeImage, &out.RuntimeImage + *out = new(api_v1.ObjectReference) + **out = **in + } else { + out.RuntimeImage = nil + } + if in.RuntimeArtifacts != nil { + in, out := &in.RuntimeArtifacts, &out.RuntimeArtifacts + *out = make([]ImageSourcePath, len(*in)) + for i := range *in { + (*out)[i] = (*in)[i] + } + } else { + out.RuntimeArtifacts = nil + } + return nil + } +} + +func DeepCopy_v1_SourceControlUser(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SourceControlUser) + out := out.(*SourceControlUser) + out.Name = in.Name + out.Email = in.Email + return nil + } +} + +func DeepCopy_v1_SourceRevision(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SourceRevision) + out := out.(*SourceRevision) + out.Type = in.Type + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSourceRevision) + **out = **in + } else { + out.Git = nil + } + return nil + } +} + +func DeepCopy_v1_WebHookTrigger(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WebHookTrigger) + out := out.(*WebHookTrigger) + out.Secret = in.Secret + out.AllowEnv = in.AllowEnv + return nil + } +} From a2a731f30cdec139375e89235f4e3a9766ee3e26 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 20 Oct 2016 20:49:36 +0530 Subject: [PATCH 03/33] Generate context path relative to project root dir in buildconfig. --- pkg/transformer/openshift/openshift.go | 28 +++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 4a66bfea..943454ee 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -18,7 +18,9 @@ package openshift import ( "fmt" + "os" "os/exec" + "path/filepath" "strings" "github.com/kubernetes-incubator/kompose/pkg/kobject" @@ -84,6 +86,30 @@ func getGitRemote(remote string) string { return string(out) } +// getAbsBuildContext returns build context relative to project root dir +func getAbsBuildContext(context string) string { + out, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + if err != nil { + return "" + } + rootDir := strings.Trim(string(out), "\n") + + var workDir string + workDir, err = os.Getwd() + if err != nil { + return "" + } + + var relPath string + relPath, err = filepath.Rel(rootDir, workDir) + + if err != nil { + return "" + } + + return relPath +} + // initImageStream initialize ImageStream object func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) *imageapi.ImageStream { tag := getImageTag(service.Image) @@ -134,7 +160,7 @@ func initBuildConfig(name string, service kobject.ServiceConfig) *buildapi.Build Ref: "master", URI: getGitRemote("origin"), }, - ContextDir: "./", + ContextDir: getAbsBuildContext(service.Build), }, Strategy: buildapi.BuildStrategy{ DockerStrategy: &buildapi.DockerBuildStrategy{ From 6d8a6a3fc8f75ec85cf193a1b2262c874d84e035 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 27 Oct 2016 16:29:44 +0530 Subject: [PATCH 04/33] In buildconfig, only set strategy type as Docker and do not set other details like imagestream to build from, etc. --- pkg/transformer/openshift/openshift.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 943454ee..e40cb5b8 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -153,7 +153,7 @@ func initBuildConfig(name string, service kobject.ServiceConfig) *buildapi.Build {Type: "ImageChange"}, }, // RunPolicy - "serial", + "Serial", buildapi.CommonSpec{ Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ @@ -163,12 +163,7 @@ func initBuildConfig(name string, service kobject.ServiceConfig) *buildapi.Build ContextDir: getAbsBuildContext(service.Build), }, Strategy: buildapi.BuildStrategy{ - DockerStrategy: &buildapi.DockerBuildStrategy{ - From: &kapi.ObjectReference{ - Kind: "ImageStreamTag", - Name: name + ":from", - }, - }, + DockerStrategy: &buildapi.DockerBuildStrategy{}, }, Output: buildapi.BuildOutput{ To: &kapi.ObjectReference{ From f46274be3870eca14a70f975906ee99593a2b8bc Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 27 Oct 2016 16:30:52 +0530 Subject: [PATCH 05/33] In buildconfig, remove trailing newline from project's remote git repu URL. --- pkg/transformer/openshift/openshift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index e40cb5b8..799ccfc2 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -83,7 +83,7 @@ func getGitRemote(remote string) string { if err != nil { return "" } - return string(out) + return strings.TrimRight(string(out), "\n") } // getAbsBuildContext returns build context relative to project root dir From efc5203d824315fa041ed5fe104160bbb4a4ad42 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 27 Oct 2016 16:57:29 +0530 Subject: [PATCH 06/33] Fix creating build context dir relative to project root dir. --- pkg/transformer/openshift/openshift.go | 27 ++++++++++++-------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 799ccfc2..b4793730 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -87,27 +87,24 @@ func getGitRemote(remote string) string { } // getAbsBuildContext returns build context relative to project root dir -func getAbsBuildContext(context string) string { - out, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() - if err != nil { - return "" - } - rootDir := strings.Trim(string(out), "\n") - - var workDir string - workDir, err = os.Getwd() +func getAbsBuildContext(context string, inputFile string) string { + workDir, err := os.Getwd() if err != nil { return "" } - var relPath string - relPath, err = filepath.Rel(rootDir, workDir) + composeFileDir := filepath.Dir(filepath.Join(workDir, inputFile)) + var out []byte + cmd := exec.Command("git", "rev-parse", "--show-prefix") + cmd.Dir = composeFileDir + out, err = cmd.Output() if err != nil { return "" } - return relPath + prefix := strings.Trim(string(out), "\n") + return filepath.Join(prefix, context) } // initImageStream initialize ImageStream object @@ -137,7 +134,7 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) } // initBuildConfig initialize Openshifts BuildConfig Object -func initBuildConfig(name string, service kobject.ServiceConfig) *buildapi.BuildConfig { +func initBuildConfig(name string, service kobject.ServiceConfig, inputFile string) *buildapi.BuildConfig { bc := &buildapi.BuildConfig{ TypeMeta: unversioned.TypeMeta{ Kind: "BuildConfig", @@ -160,7 +157,7 @@ func initBuildConfig(name string, service kobject.ServiceConfig) *buildapi.Build Ref: "master", URI: getGitRemote("origin"), }, - ContextDir: getAbsBuildContext(service.Build), + ContextDir: getAbsBuildContext(service.Build, inputFile), }, Strategy: buildapi.BuildStrategy{ DockerStrategy: &buildapi.DockerBuildStrategy{}, @@ -292,7 +289,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } if opt.CreateBuildConfig && service.Build != "" { - objects = append(objects, initBuildConfig(name, service)) // Openshift BuildConfigs + objects = append(objects, initBuildConfig(name, service, opt.InputFile)) // Openshift BuildConfigs } // If ports not provided in configuration we will not make service From 6156d144f2efeba4df6b2c211de6dbc629cf34d0 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 27 Oct 2016 17:09:14 +0530 Subject: [PATCH 07/33] Fix generating project source repo URL in openshift buildconfig. --- pkg/transformer/openshift/openshift.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index b4793730..47b3000a 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -83,7 +83,13 @@ func getGitRemote(remote string) string { if err != nil { return "" } - return strings.TrimRight(string(out), "\n") + url := strings.TrimRight(string(out), "\n") + + if !strings.HasSuffix(url, ".git") { + url += ".git" + } + + return url } // getAbsBuildContext returns build context relative to project root dir From 9e2849addf5aaff17ea03b5e598d41a8c0c03b94 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Sat, 29 Oct 2016 15:34:21 +0530 Subject: [PATCH 08/33] In Openshift, don't create imagestream tags for a service with build diretive. --- pkg/transformer/openshift/openshift.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 47b3000a..10ff71ee 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -108,7 +108,6 @@ func getAbsBuildContext(context string, inputFile string) string { if err != nil { return "" } - prefix := strings.Trim(string(out), "\n") return filepath.Join(prefix, context) } @@ -117,6 +116,20 @@ func getAbsBuildContext(context string, inputFile string) string { func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) *imageapi.ImageStream { tag := getImageTag(service.Image) + var tags map[string]imageapi.TagReference + if service.Build != "" { + tags = map[string]imageapi.TagReference{} + } else { + tags = map[string]imageapi.TagReference{ + tag: imageapi.TagReference{ + From: &api.ObjectReference{ + Kind: "DockerImage", + Name: service.Image, + }, + }, + } + } + is := &imageapi.ImageStream{ TypeMeta: unversioned.TypeMeta{ Kind: "ImageStream", @@ -126,14 +139,7 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) Name: name, }, Spec: imageapi.ImageStreamSpec{ - Tags: map[string]imageapi.TagReference{ - tag: imageapi.TagReference{ - From: &api.ObjectReference{ - Kind: "DockerImage", - Name: service.Image, - }, - }, - }, + Tags: tags, }, } return is From a9241e6fe5d1f9f5da96df963c3ff1def8aafb6c Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 3 Nov 2016 15:11:53 +0530 Subject: [PATCH 09/33] Added example for openshift buildconfig. --- examples/buildconfig/build/Dockerfile | 5 +++++ examples/buildconfig/docker-compose.yml | 7 +++++++ 2 files changed, 12 insertions(+) create mode 100644 examples/buildconfig/build/Dockerfile create mode 100644 examples/buildconfig/docker-compose.yml diff --git a/examples/buildconfig/build/Dockerfile b/examples/buildconfig/build/Dockerfile new file mode 100644 index 00000000..a191860e --- /dev/null +++ b/examples/buildconfig/build/Dockerfile @@ -0,0 +1,5 @@ +FROM busybox + + +RUN touch /test + diff --git a/examples/buildconfig/docker-compose.yml b/examples/buildconfig/docker-compose.yml new file mode 100644 index 00000000..facaa266 --- /dev/null +++ b/examples/buildconfig/docker-compose.yml @@ -0,0 +1,7 @@ +version: "2" + +services: + foo: + build: "./build" + command: "sleep 3600" + From fa724fe12dad6f0cedff675edc91194a778cd229 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Fri, 4 Nov 2016 00:33:04 +0530 Subject: [PATCH 10/33] Allow specifying repo,branch info for buildconfig over CLI. --- cli/command/command.go | 276 +++++++++++++++++++++++++ cmd/convert.go | 10 +- pkg/kobject/kobject.go | 2 + pkg/transformer/openshift/openshift.go | 13 +- 4 files changed, 296 insertions(+), 5 deletions(-) create mode 100644 cli/command/command.go diff --git a/cli/command/command.go b/cli/command/command.go new file mode 100644 index 00000000..654a0739 --- /dev/null +++ b/cli/command/command.go @@ -0,0 +1,276 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/kubernetes-incubator/kompose/cli/app" + "github.com/urfave/cli" +) + +// Hook for erroring and exit out on warning +type errorOnWarningHook struct{} + +// array consisting of our common conversion flags that will get passed along +// for the autocomplete aspect +var ( + commonConvertFlagsList = []string{"out", "replicas", "yaml", "stdout", "emptyvols"} +) + +func (errorOnWarningHook) Levels() []logrus.Level { + return []logrus.Level{logrus.WarnLevel} +} + +func (errorOnWarningHook) Fire(entry *logrus.Entry) error { + logrus.Fatalln(entry.Message) + return nil +} + +// BeforeApp is an action that is executed before any cli command. +func BeforeApp(c *cli.Context) error { + + if c.GlobalBool("verbose") { + logrus.SetLevel(logrus.DebugLevel) + } else if c.GlobalBool("suppress-warnings") { + logrus.SetLevel(logrus.ErrorLevel) + } else if c.GlobalBool("error-on-warning") { + hook := errorOnWarningHook{} + logrus.AddHook(hook) + } + + // First command added was dummy convert command so removing it + c.App.Commands = c.App.Commands[1:] + provider := strings.ToLower(c.GlobalString("provider")) + switch provider { + case "kubernetes": + c.App.Commands = append(c.App.Commands, ConvertKubernetesCommand()) + case "openshift": + c.App.Commands = append(c.App.Commands, ConvertOpenShiftCommand()) + default: + logrus.Fatalf("Unknown provider. Supported providers are kubernetes and openshift.") + } + + return nil +} + +// When user tries out `kompose -h`, the convert option should be visible +// so adding a dummy `convert` command, real convert commands depending on Providers +// mentioned are added in `BeforeApp` function +func ConvertCommandDummy() cli.Command { + command := cli.Command{ + Name: "convert", + Usage: fmt.Sprintf("Convert Docker Compose file (e.g. %s) to Kubernetes/OpenShift objects", app.DefaultComposeFile), + } + return command +} + +// Generate the Bash completion flag taking the common flags plus whatever is +// passed into the function to correspond to the primary command specific args +func generateBashCompletion(args []string) { + commonArgs := []string{"bundle", "file", "suppress-warnings", "verbose", "error-on-warning", "provider"} + flags := append(commonArgs, args...) + + for _, f := range flags { + fmt.Printf("--%s\n", f) + } +} + +// ConvertKubernetesCommand defines the kompose convert subcommand for Kubernetes provider +func ConvertKubernetesCommand() cli.Command { + command := cli.Command{ + Name: "convert", + Usage: fmt.Sprintf("Convert Docker Compose file (e.g. %s) to Kubernetes objects", app.DefaultComposeFile), + Action: func(c *cli.Context) { + app.Convert(c) + }, + BashComplete: func(c *cli.Context) { + flags := []string{"chart", "deployment", "daemonset", "replicationcontroller"} + generateBashCompletion(append(flags, commonConvertFlagsList...)) + }, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "chart,c", + Usage: "Create a Helm chart for converted objects", + }, + cli.BoolFlag{ + Name: "deployment,d", + Usage: "Generate a Kubernetes deployment object (default on)", + }, + cli.BoolFlag{ + Name: "daemonset,ds", + Usage: "Generate a Kubernetes daemonset object", + }, + cli.BoolFlag{ + Name: "replicationcontroller,rc", + Usage: "Generate a Kubernetes replication controller object", + }, + }, + } + command.Flags = append(command.Flags, commonConvertFlags()...) + return command +} + +// ConvertOpenShiftCommand defines the kompose convert subcommand for OpenShift provider +func ConvertOpenShiftCommand() cli.Command { + command := cli.Command{ + Name: "convert", + Usage: fmt.Sprintf("Convert Docker Compose file (e.g. %s) to OpenShift objects", app.DefaultComposeFile), + Action: func(c *cli.Context) { + app.Convert(c) + }, + BashComplete: func(c *cli.Context) { + flags := []string{"deploymentconfig"} + generateBashCompletion(append(flags, commonConvertFlagsList...)) + }, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "deploymentconfig,dc", + Usage: "Generate a OpenShift DeploymentConfig object", + }, + cli.BoolFlag{ + Name: "buildconfig,bc", + Usage: "Generate a BuildConfig for Openshift", + }, + cli.StringFlag{ + Name: "repo", + Value: "", + Usage: "Specify source repository for buildconfig (default remote origin)", + EnvVar: "REPO", + }, + cli.StringFlag{ + Name: "branch", + Value: "master", + Usage: "Specify repository branch to use for buildconfig (default master)", + EnvVar: "BRANCH", + }, + }, + } + command.Flags = append(command.Flags, commonConvertFlags()...) + return command +} + +func commonConvertFlags() []cli.Flag { + return []cli.Flag{ + cli.StringFlag{ + Name: "out,o", + Usage: "Specify path to a file or a directory to save generated objects into. If path is a directory, the objects are stored in that directory. If path is a file, then objects are stored in that single file. File is created if it does not exist.", + EnvVar: "OUTPUT_FILE", + }, + cli.IntFlag{ + Name: "replicas", + Value: 1, + Usage: "Specify the number of replicas in the generated resource spec (default 1)", + }, + cli.BoolFlag{ + Name: "yaml, y", + Usage: "Generate resource file in yaml format", + }, + cli.BoolFlag{ + Name: "stdout", + Usage: "Print converted objects to stdout", + }, + cli.BoolFlag{ + Name: "emptyvols", + Usage: "Use Empty Volumes. Don't generate PVCs", + }, + } +} + +// UpCommand defines the kompose up subcommand. +func UpCommand() cli.Command { + return cli.Command{ + Name: "up", + Usage: "Deploy your Dockerized application to Kubernetes (default: creating Kubernetes deployment and service)", + Action: func(c *cli.Context) { + app.Up(c) + }, + BashComplete: func(c *cli.Context) { + flags := []string{"emptyvols"} + generateBashCompletion(flags) + }, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "emptyvols", + Usage: "Use Empty Volumes. Don't generate PVCs", + }, + }, + } +} + +// DownCommand defines the kompose down subcommand. +func DownCommand() cli.Command { + return cli.Command{ + Name: "down", + Usage: "Delete instantiated services/deployments from kubernetes", + Action: func(c *cli.Context) { + app.Down(c) + }, + BashComplete: func(c *cli.Context) { + flags := []string{"emptyvols"} + generateBashCompletion(flags) + }, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "emptyvols", + Usage: "Use Empty Volumes. Don't generate PVCs", + }, + }, + } +} + +// CommonFlags defines the flags that are in common for all subcommands. +func CommonFlags() []cli.Flag { + return []cli.Flag{ + cli.StringFlag{ + Name: "bundle,dab", + Usage: "Specify a Distributed Application Bundle (DAB) file", + EnvVar: "DAB_FILE", + }, + + cli.StringFlag{ + Name: "file,f", + Usage: fmt.Sprintf("Specify an alternative compose file (default: %s)", app.DefaultComposeFile), + Value: app.DefaultComposeFile, + EnvVar: "COMPOSE_FILE", + }, + // creating a flag to suppress warnings + cli.BoolFlag{ + Name: "suppress-warnings", + Usage: "Suppress all warnings", + }, + // creating a flag to show all kinds of warnings + cli.BoolFlag{ + Name: "verbose", + Usage: "Show all type of logs", + }, + // flag to treat any warning as error + cli.BoolFlag{ + Name: "error-on-warning", + Usage: "Treat any warning as error", + }, + // mention the end provider + cli.StringFlag{ + Name: "provider", + Usage: "Generate artifacts for this provider", + Value: app.DefaultProvider, + EnvVar: "PROVIDER", + }, + } +} diff --git a/cmd/convert.go b/cmd/convert.go index f7c23d63..a3539f42 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -26,7 +26,7 @@ import ( ) var ( - ConvertSource, ConvertOut string + ConvertSource, ConvertOut, ConvertRepo, ConvertBranch string ConvertChart, ConvertDeployment, ConvertDaemonSet bool ConvertReplicationController, ConvertYaml, ConvertStdout bool ConvertEmptyVols, ConvertDeploymentConfig, ConvertBuildConfig bool @@ -54,6 +54,8 @@ var convertCmd = &cobra.Command{ CreateDS: ConvertDaemonSet, CreateRC: ConvertReplicationController, CreateBuildConfig: ConvertBuildConfig, + Repo: ConvertRepo, + Branch: ConvertBranch, CreateDeploymentConfig: ConvertDeploymentConfig, EmptyVols: ConvertEmptyVols, } @@ -87,6 +89,10 @@ func init() { convertCmd.Flags().MarkHidden("deployment-config") convertCmd.Flags().BoolVar(&ConvertBuildConfig, "build-config", false, "Generate an OpenShift buildconfig object") convertCmd.Flags().MarkHidden("build-config") + convertCmd.Flags().StringVar(&ConvertRepo, "repo", "", "Specify source repository for buildconfig (default remote origin)") + convertCmd.Flags().MarkHidden("repo") + convertCmd.Flags().StringVar(&ConvertBranch, "branch", "master", "Specify repository branch to use for buildconfig (default master)") + convertCmd.Flags().MarkHidden("branch") // Standard between the two convertCmd.Flags().BoolVarP(&ConvertYaml, "yaml", "y", false, "Generate resource files into yaml format") @@ -110,12 +116,14 @@ Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} Resource Flags: + --branch Specify repository branch to use for buildconfig (default master) --build-config Generate an Openshift build config object -c, --chart Create a Helm chart for converted objects --daemon-set Generate a Kubernetes daemonset object -d, --deployment Generate a Kubernetes deployment object --deployment-config Generate an OpenShift deployment config object --replication-controller Generate a Kubernetes replication controller object + --repo Specify source repository for buildconfig (default remote origin) Flags: {{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} diff --git a/pkg/kobject/kobject.go b/pkg/kobject/kobject.go index e5715e37..af2eb480 100644 --- a/pkg/kobject/kobject.go +++ b/pkg/kobject/kobject.go @@ -34,6 +34,8 @@ type ConvertOptions struct { CreateDS bool CreateDeploymentConfig bool CreateBuildConfig bool + Repo string + Branch string CreateChart bool GenerateYaml bool EmptyVols bool diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 10ff71ee..47df58c7 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -146,7 +146,12 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) } // initBuildConfig initialize Openshifts BuildConfig Object -func initBuildConfig(name string, service kobject.ServiceConfig, inputFile string) *buildapi.BuildConfig { +func initBuildConfig(name string, service kobject.ServiceConfig, inputFile string, repo string, branch string) *buildapi.BuildConfig { + uri := repo + if uri == "" { + uri = getGitRemote("origin") + } + bc := &buildapi.BuildConfig{ TypeMeta: unversioned.TypeMeta{ Kind: "BuildConfig", @@ -166,8 +171,8 @@ func initBuildConfig(name string, service kobject.ServiceConfig, inputFile strin buildapi.CommonSpec{ Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ - Ref: "master", - URI: getGitRemote("origin"), + Ref: branch, + URI: uri, }, ContextDir: getAbsBuildContext(service.Build, inputFile), }, @@ -301,7 +306,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } if opt.CreateBuildConfig && service.Build != "" { - objects = append(objects, initBuildConfig(name, service, opt.InputFile)) // Openshift BuildConfigs + objects = append(objects, initBuildConfig(name, service, opt.InputFile, opt.Repo, opt.Branch)) // Openshift BuildConfigs } // If ports not provided in configuration we will not make service From 6f3e670af17aee6eb800450ca498fc86ed8c1cd8 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 15 Nov 2016 14:15:04 +0530 Subject: [PATCH 11/33] Refactor openshift buildconfig CLI options. - Remove --buildconfig, -bc option. Create buildconfig if service has build info - Rename --branch to --build-branch - Rename --repo to --build-repo --- cli/command/command.go | 12 ++++------ cmd/convert.go | 32 +++++++++++--------------- pkg/app/app.go | 10 +++++--- pkg/kobject/kobject.go | 5 ++-- pkg/transformer/openshift/openshift.go | 4 ++-- 5 files changed, 29 insertions(+), 34 deletions(-) diff --git a/cli/command/command.go b/cli/command/command.go index 654a0739..db4c917e 100644 --- a/cli/command/command.go +++ b/cli/command/command.go @@ -144,21 +144,17 @@ func ConvertOpenShiftCommand() cli.Command { Name: "deploymentconfig,dc", Usage: "Generate a OpenShift DeploymentConfig object", }, - cli.BoolFlag{ - Name: "buildconfig,bc", - Usage: "Generate a BuildConfig for Openshift", - }, cli.StringFlag{ - Name: "repo", + Name: "build-repo", Value: "", Usage: "Specify source repository for buildconfig (default remote origin)", - EnvVar: "REPO", + EnvVar: "BUILDREPO", }, cli.StringFlag{ - Name: "branch", + Name: "build-branch", Value: "master", Usage: "Specify repository branch to use for buildconfig (default master)", - EnvVar: "BRANCH", + EnvVar: "BUILDBRANCH", }, }, } diff --git a/cmd/convert.go b/cmd/convert.go index a3539f42..4ca69efd 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -26,12 +26,12 @@ import ( ) var ( - ConvertSource, ConvertOut, ConvertRepo, ConvertBranch string - ConvertChart, ConvertDeployment, ConvertDaemonSet bool - ConvertReplicationController, ConvertYaml, ConvertStdout bool - ConvertEmptyVols, ConvertDeploymentConfig, ConvertBuildConfig bool - ConvertReplicas int - ConvertOpt kobject.ConvertOptions + ConvertSource, ConvertOut, ConvertBuildRepo, ConvertBuildBranch string + ConvertChart, ConvertDeployment, ConvertDaemonSet bool + ConvertReplicationController, ConvertYaml, ConvertStdout bool + ConvertEmptyVols, ConvertDeploymentConfig, ConvertBuildConfig bool + ConvertReplicas int + ConvertOpt kobject.ConvertOptions ) var ConvertProvider string = GlobalProvider @@ -53,9 +53,8 @@ var convertCmd = &cobra.Command{ CreateD: ConvertDeployment, CreateDS: ConvertDaemonSet, CreateRC: ConvertReplicationController, - CreateBuildConfig: ConvertBuildConfig, - Repo: ConvertRepo, - Branch: ConvertBranch, + BuildRepo: ConvertBuildRepo, + BuildBranch: ConvertBuildBranch, CreateDeploymentConfig: ConvertDeploymentConfig, EmptyVols: ConvertEmptyVols, } @@ -87,12 +86,10 @@ func init() { // OpenShift only convertCmd.Flags().BoolVar(&ConvertDeploymentConfig, "deployment-config", true, "Generate an OpenShift deploymentconfig object") convertCmd.Flags().MarkHidden("deployment-config") - convertCmd.Flags().BoolVar(&ConvertBuildConfig, "build-config", false, "Generate an OpenShift buildconfig object") - convertCmd.Flags().MarkHidden("build-config") - convertCmd.Flags().StringVar(&ConvertRepo, "repo", "", "Specify source repository for buildconfig (default remote origin)") - convertCmd.Flags().MarkHidden("repo") - convertCmd.Flags().StringVar(&ConvertBranch, "branch", "master", "Specify repository branch to use for buildconfig (default master)") - convertCmd.Flags().MarkHidden("branch") + convertCmd.Flags().StringVar(&ConvertBuildRepo, "build-repo", "", "Specify source repository for buildconfig (default remote origin)") + convertCmd.Flags().MarkHidden("build-repo") + convertCmd.Flags().StringVar(&ConvertBuildBranch, "build-branch", "master", "Specify repository branch to use for buildconfig (default master)") + convertCmd.Flags().MarkHidden("build-branch") // Standard between the two convertCmd.Flags().BoolVarP(&ConvertYaml, "yaml", "y", false, "Generate resource files into yaml format") @@ -116,14 +113,13 @@ Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} Resource Flags: - --branch Specify repository branch to use for buildconfig (default master) - --build-config Generate an Openshift build config object + --build-branch Specify repository branch to use for buildconfig (default master) + --build-repo Specify source repository for buildconfig (default remote origin) -c, --chart Create a Helm chart for converted objects --daemon-set Generate a Kubernetes daemonset object -d, --deployment Generate a Kubernetes deployment object --deployment-config Generate an OpenShift deployment config object --replication-controller Generate a Kubernetes replication controller object - --repo Specify source repository for buildconfig (default remote origin) Flags: {{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} diff --git a/pkg/app/app.go b/pkg/app/app.go index fd76d9e6..0f511634 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -63,7 +63,8 @@ func ValidateFlags(bundle string, args []string, cmd *cobra.Command, opt *kobjec // OpenShift specific flags deploymentConfig := cmd.Flags().Lookup("deployment-config").Changed - buildConfig := cmd.Flags().Lookup("build-config").Changed + buildRepo := cmd.Flags().Lookup("build-repo").Changed + buildBranch := cmd.Flags().Lookup("build-branch").Changed // Kubernetes specific flags chart := cmd.Flags().Lookup("chart").Changed @@ -90,8 +91,11 @@ func ValidateFlags(bundle string, args []string, cmd *cobra.Command, opt *kobjec if deploymentConfig { logrus.Fatalf("--deployment-config is an OpenShift only flag") } - if buildConfig { - logrus.Fatalf("--build-config is an Openshift only flag") + if buildRepo { + logrus.Fatalf("--build-repo is an Openshift only flag") + } + if buildBranch { + logrus.Fatalf("--build-branch is an Openshift only flag") } } diff --git a/pkg/kobject/kobject.go b/pkg/kobject/kobject.go index af2eb480..cfe907b0 100644 --- a/pkg/kobject/kobject.go +++ b/pkg/kobject/kobject.go @@ -33,9 +33,8 @@ type ConvertOptions struct { CreateRC bool CreateDS bool CreateDeploymentConfig bool - CreateBuildConfig bool - Repo string - Branch string + BuildRepo string + BuildBranch string CreateChart bool GenerateYaml bool EmptyVols bool diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 47df58c7..31495595 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -305,8 +305,8 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C objects = append(objects, o.initImageStream(name, service)) } - if opt.CreateBuildConfig && service.Build != "" { - objects = append(objects, initBuildConfig(name, service, opt.InputFile, opt.Repo, opt.Branch)) // Openshift BuildConfigs + if service.Build != "" { + objects = append(objects, initBuildConfig(name, service, opt.InputFile, opt.BuildRepo, opt.BuildBranch)) // Openshift BuildConfigs } // If ports not provided in configuration we will not make service From f5566d4097949b178e0d5e752ed18c368e2f1819 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 15 Nov 2016 13:17:30 +0530 Subject: [PATCH 12/33] Improve error handling and logging for openshift buildconfig creation. - Error out on failure to create buildconfig, with meaningful error messages, e.g., git missing, error in fetching git remote, error in creating build context, etc. - Add info log for build repo/branch being used. --- pkg/transformer/openshift/openshift.go | 41 ++++++++++++++++++++------ 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 31495595..e49f1f39 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -77,11 +77,17 @@ func getImageTag(image string) string { } } +// hasGitBinary checks if the 'git' binary is available on the system +func hasGitBinary() bool { + _, err := exec.LookPath("git") + return err == nil +} + // getGitRemote gets git remote URI for the current git repo -func getGitRemote(remote string) string { +func getGitRemote(remote string) (string, error) { out, err := exec.Command("git", "remote", "get-url", remote).Output() if err != nil { - return "" + return "", err } url := strings.TrimRight(string(out), "\n") @@ -89,14 +95,14 @@ func getGitRemote(remote string) string { url += ".git" } - return url + return url, nil } // getAbsBuildContext returns build context relative to project root dir -func getAbsBuildContext(context string, inputFile string) string { +func getAbsBuildContext(context string, inputFile string) (string, error) { workDir, err := os.Getwd() if err != nil { - return "" + return "", err } composeFileDir := filepath.Dir(filepath.Join(workDir, inputFile)) @@ -106,10 +112,10 @@ func getAbsBuildContext(context string, inputFile string) string { cmd.Dir = composeFileDir out, err = cmd.Output() if err != nil { - return "" + return "", err } prefix := strings.Trim(string(out), "\n") - return filepath.Join(prefix, context) + return filepath.Join(prefix, context), nil } // initImageStream initialize ImageStream object @@ -147,11 +153,28 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) // initBuildConfig initialize Openshifts BuildConfig Object func initBuildConfig(name string, service kobject.ServiceConfig, inputFile string, repo string, branch string) *buildapi.BuildConfig { + var err error + uri := repo if uri == "" { - uri = getGitRemote("origin") + if hasGitBinary() { + uri, err = getGitRemote("origin") + if err != nil { + logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") + } + } else { + logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository to use for build using '--build-repo' option.") + } } + var contextDir string + contextDir, err = getAbsBuildContext(service.Build, inputFile) + if err != nil { + logrus.Fatalf("[%s] Buildconfig cannote be created due to error in creating build context.", name) + } + + logrus.Infof("[%s] Buildconfig using repo: %s, branch: %s as source.", name, uri, branch) + bc := &buildapi.BuildConfig{ TypeMeta: unversioned.TypeMeta{ Kind: "BuildConfig", @@ -174,7 +197,7 @@ func initBuildConfig(name string, service kobject.ServiceConfig, inputFile strin Ref: branch, URI: uri, }, - ContextDir: getAbsBuildContext(service.Build, inputFile), + ContextDir: contextDir, }, Strategy: buildapi.BuildStrategy{ DockerStrategy: &buildapi.DockerBuildStrategy{}, From 1a7b19c0706b6d8bc34f2ffd0a486d96071e2f67 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 15 Nov 2016 14:57:24 +0530 Subject: [PATCH 13/33] Resolve/print buildconfig source repo info globally for compose project. --- pkg/transformer/openshift/openshift.go | 43 ++++++++++++++------------ 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index e49f1f39..097e1c86 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -153,28 +153,11 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) // initBuildConfig initialize Openshifts BuildConfig Object func initBuildConfig(name string, service kobject.ServiceConfig, inputFile string, repo string, branch string) *buildapi.BuildConfig { - var err error - - uri := repo - if uri == "" { - if hasGitBinary() { - uri, err = getGitRemote("origin") - if err != nil { - logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") - } - } else { - logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository to use for build using '--build-repo' option.") - } - } - - var contextDir string - contextDir, err = getAbsBuildContext(service.Build, inputFile) + contextDir, err := getAbsBuildContext(service.Build, inputFile) if err != nil { logrus.Fatalf("[%s] Buildconfig cannote be created due to error in creating build context.", name) } - logrus.Infof("[%s] Buildconfig using repo: %s, branch: %s as source.", name, uri, branch) - bc := &buildapi.BuildConfig{ TypeMeta: unversioned.TypeMeta{ Kind: "BuildConfig", @@ -195,7 +178,7 @@ func initBuildConfig(name string, service kobject.ServiceConfig, inputFile strin Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ Ref: branch, - URI: uri, + URI: repo, }, ContextDir: contextDir, }, @@ -311,6 +294,9 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } // this will hold all the converted data var allobjects []runtime.Object + var err error + hasBuild := false + buildRepo := "" for name, service := range komposeObject.ServiceConfigs { var objects []runtime.Object @@ -329,7 +315,20 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } if service.Build != "" { - objects = append(objects, initBuildConfig(name, service, opt.InputFile, opt.BuildRepo, opt.BuildBranch)) // Openshift BuildConfigs + if !hasBuild { + if opt.BuildRepo == "" { + if hasGitBinary() { + buildRepo, err = getGitRemote("origin") + if err != nil { + logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") + } + } else { + logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository to use for build using '--build-repo' option.") + } + } + hasBuild = true + } + objects = append(objects, initBuildConfig(name, service, opt.InputFile, buildRepo, opt.BuildBranch)) // Openshift BuildConfigs } // If ports not provided in configuration we will not make service @@ -346,6 +345,10 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C allobjects = append(allobjects, objects...) } + + if hasBuild { + logrus.Infof("Buildconfig using %s::%s as source.", buildRepo, opt.BuildBranch) + } // If docker-compose has a volumes_from directive it will be handled here o.VolumesFrom(&allobjects, komposeObject) // sort all object so Services are first From bea50b1608f64eaec023fde2b9d47dd500ac1e5a Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 16 Nov 2016 18:26:42 +0530 Subject: [PATCH 14/33] Fix tests for buildconfig. --- .../fixtures/ngnix-node-redis/output-os.json | 388 ++++++++++++------ 1 file changed, 254 insertions(+), 134 deletions(-) diff --git a/script/test/fixtures/ngnix-node-redis/output-os.json b/script/test/fixtures/ngnix-node-redis/output-os.json index 0e93b3a6..c6338945 100644 --- a/script/test/fixtures/ngnix-node-redis/output-os.json +++ b/script/test/fixtures/ngnix-node-redis/output-os.json @@ -3,33 +3,6 @@ "apiVersion": "v1", "metadata": {}, "items": [ - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "node2", - "creationTimestamp": null, - "labels": { - "service": "node2" - } - }, - "spec": { - "ports": [ - { - "name": "8080", - "protocol": "TCP", - "port": 8080, - "targetPort": 8080 - } - ], - "selector": { - "service": "node2" - } - }, - "status": { - "loadBalancer": {} - } - }, { "kind": "Service", "apiVersion": "v1", @@ -139,7 +112,7 @@ } }, { - "kind": "DeploymentConfig", + "kind": "Service", "apiVersion": "v1", "metadata": { "name": "node2", @@ -149,81 +122,20 @@ } }, "spec": { - "strategy": { - "resources": {} - }, - "triggers": [ + "ports": [ { - "type": "ConfigChange" - }, - { - "type": "ImageChange", - "imageChangeParams": { - "automatic": true, - "containerNames": [ - "node2" - ], - "from": { - "kind": "ImageStreamTag", - "name": "node2:latest" - } - } + "name": "8080", + "protocol": "TCP", + "port": 8080, + "targetPort": 8080 } ], - "replicas": 1, - "test": false, "selector": { "service": "node2" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "service": "node2" - } - }, - "spec": { - "containers": [ - { - "name": "node2", - "image": " ", - "ports": [ - { - "containerPort": 8080, - "protocol": "TCP" - } - ], - "resources": {} - } - ], - "restartPolicy": "Always" - } } }, - "status": {} - }, - { - "kind": "ImageStream", - "apiVersion": "v1", - "metadata": { - "name": "node2", - "creationTimestamp": null - }, - "spec": { - "tags": [ - { - "name": "latest", - "annotations": null, - "from": { - "kind": "DockerImage" - }, - "generation": null, - "importPolicy": {} - } - ] - }, "status": { - "dockerImageRepository": "" + "loadBalancer": {} } }, { @@ -297,23 +209,53 @@ "name": "node3", "creationTimestamp": null }, - "spec": { - "tags": [ - { - "name": "latest", - "annotations": null, - "from": { - "kind": "DockerImage" - }, - "generation": null, - "importPolicy": {} - } - ] - }, + "spec": {}, "status": { "dockerImageRepository": "" } }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "node3", + "creationTimestamp": null + }, + "spec": { + "triggers": [ + { + "type": "ConfigChange" + }, + { + "type": "ImageChange" + } + ], + "runPolicy": "Serial", + "source": { + "type": "Git", + "git": { + "uri": "https://github.com/kubernetes-incubator/kompose.git", + "ref": "master" + }, + "contextDir": "script/test/fixtures/ngnix-node-redis/node" + }, + "strategy": { + "type": "Docker", + "dockerStrategy": {} + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "node3:latest" + } + }, + "resources": {}, + "postCommit": {} + }, + "status": { + "lastVersion": 0 + } + }, { "kind": "DeploymentConfig", "apiVersion": "v1", @@ -474,23 +416,53 @@ "name": "nginx", "creationTimestamp": null }, - "spec": { - "tags": [ - { - "name": "latest", - "annotations": null, - "from": { - "kind": "DockerImage" - }, - "generation": null, - "importPolicy": {} - } - ] - }, + "spec": {}, "status": { "dockerImageRepository": "" } }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "nginx", + "creationTimestamp": null + }, + "spec": { + "triggers": [ + { + "type": "ConfigChange" + }, + { + "type": "ImageChange" + } + ], + "runPolicy": "Serial", + "source": { + "type": "Git", + "git": { + "uri": "https://github.com/kubernetes-incubator/kompose.git", + "ref": "master" + }, + "contextDir": "script/test/fixtures/ngnix-node-redis/nginx" + }, + "strategy": { + "type": "Docker", + "dockerStrategy": {} + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "nginx:latest" + } + }, + "resources": {}, + "postCommit": {} + }, + "status": { + "lastVersion": 0 + } + }, { "kind": "DeploymentConfig", "apiVersion": "v1", @@ -562,22 +534,170 @@ "name": "node1", "creationTimestamp": null }, - "spec": { - "tags": [ - { - "name": "latest", - "annotations": null, - "from": { - "kind": "DockerImage" - }, - "generation": null, - "importPolicy": {} - } - ] - }, + "spec": {}, "status": { "dockerImageRepository": "" } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "node1", + "creationTimestamp": null + }, + "spec": { + "triggers": [ + { + "type": "ConfigChange" + }, + { + "type": "ImageChange" + } + ], + "runPolicy": "Serial", + "source": { + "type": "Git", + "git": { + "uri": "https://github.com/kubernetes-incubator/kompose.git", + "ref": "master" + }, + "contextDir": "script/test/fixtures/ngnix-node-redis/node" + }, + "strategy": { + "type": "Docker", + "dockerStrategy": {} + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "node1:latest" + } + }, + "resources": {}, + "postCommit": {} + }, + "status": { + "lastVersion": 0 + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "node2", + "creationTimestamp": null, + "labels": { + "service": "node2" + } + }, + "spec": { + "strategy": { + "resources": {} + }, + "triggers": [ + { + "type": "ConfigChange" + }, + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "node2" + ], + "from": { + "kind": "ImageStreamTag", + "name": "node2:latest" + } + } + } + ], + "replicas": 1, + "test": false, + "selector": { + "service": "node2" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "service": "node2" + } + }, + "spec": { + "containers": [ + { + "name": "node2", + "image": " ", + "ports": [ + { + "containerPort": 8080, + "protocol": "TCP" + } + ], + "resources": {} + } + ], + "restartPolicy": "Always" + } + } + }, + "status": {} + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "node2", + "creationTimestamp": null + }, + "spec": {}, + "status": { + "dockerImageRepository": "" + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "node2", + "creationTimestamp": null + }, + "spec": { + "triggers": [ + { + "type": "ConfigChange" + }, + { + "type": "ImageChange" + } + ], + "runPolicy": "Serial", + "source": { + "type": "Git", + "git": { + "uri": "https://github.com/kubernetes-incubator/kompose.git", + "ref": "master" + }, + "contextDir": "script/test/fixtures/ngnix-node-redis/node" + }, + "strategy": { + "type": "Docker", + "dockerStrategy": {} + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "node2:latest" + } + }, + "resources": {}, + "postCommit": {} + }, + "status": { + "lastVersion": 0 + } } ] } From e83b454536a1d65e83b8ce45b9fa0eba84a1369b Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 16 Nov 2016 21:39:42 +0530 Subject: [PATCH 15/33] Bugfix: Accept abs path for compose file for buildconfig. Change directory to compose file dir when resolving git remote. --- pkg/transformer/openshift/openshift.go | 42 ++++++++++++++++---------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 097e1c86..38114e77 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -84,8 +84,10 @@ func hasGitBinary() bool { } // getGitRemote gets git remote URI for the current git repo -func getGitRemote(remote string) (string, error) { - out, err := exec.Command("git", "remote", "get-url", remote).Output() +func getGitRemote(composeFileDir string, remote string) (string, error) { + cmd := exec.Command("git", "remote", "get-url", remote) + cmd.Dir = composeFileDir + out, err := cmd.Output() if err != nil { return "", err } @@ -98,19 +100,23 @@ func getGitRemote(remote string) (string, error) { return url, nil } -// getAbsBuildContext returns build context relative to project root dir -func getAbsBuildContext(context string, inputFile string) (string, error) { - workDir, err := os.Getwd() - if err != nil { - return "", err +// getComposeFileDir returns compose file directory +func getComposeFileDir(inputFile string) (string, error) { + if strings.Index(inputFile, "/") != 0 { + workDir, err := os.Getwd() + if err != nil { + return "", err + } + inputFile = filepath.Join(workDir, inputFile) } + return filepath.Dir(inputFile), nil +} - composeFileDir := filepath.Dir(filepath.Join(workDir, inputFile)) - - var out []byte +// getAbsBuildContext returns build context relative to project root dir +func getAbsBuildContext(context string, composeFileDir string) (string, error) { cmd := exec.Command("git", "rev-parse", "--show-prefix") cmd.Dir = composeFileDir - out, err = cmd.Output() + out, err := cmd.Output() if err != nil { return "", err } @@ -152,8 +158,8 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) } // initBuildConfig initialize Openshifts BuildConfig Object -func initBuildConfig(name string, service kobject.ServiceConfig, inputFile string, repo string, branch string) *buildapi.BuildConfig { - contextDir, err := getAbsBuildContext(service.Build, inputFile) +func initBuildConfig(name string, service kobject.ServiceConfig, composeFileDir string, repo string, branch string) *buildapi.BuildConfig { + contextDir, err := getAbsBuildContext(service.Build, composeFileDir) if err != nil { logrus.Fatalf("[%s] Buildconfig cannote be created due to error in creating build context.", name) } @@ -295,6 +301,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C // this will hold all the converted data var allobjects []runtime.Object var err error + var composeFileDir string hasBuild := false buildRepo := "" @@ -316,14 +323,17 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C if service.Build != "" { if !hasBuild { + composeFileDir, err = getComposeFileDir(opt.InputFile) + if err != nil { + logrus.Warningf("Error in detecting compose file's directory.") + continue + } if opt.BuildRepo == "" { if hasGitBinary() { - buildRepo, err = getGitRemote("origin") + buildRepo, err = getGitRemote(composeFileDir, "origin") if err != nil { logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") } - } else { - logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository to use for build using '--build-repo' option.") } } hasBuild = true From eb719f85c2f514c92642ff9ec147f22680a81d77 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Mon, 21 Nov 2016 20:41:23 +0530 Subject: [PATCH 16/33] Fix tests for buildconfig support. --- script/test/cmd/tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/test/cmd/tests.sh b/script/test/cmd/tests.sh index ea9219a3..50d4505f 100755 --- a/script/test/cmd/tests.sh +++ b/script/test/cmd/tests.sh @@ -46,7 +46,7 @@ unset $(cat $KOMPOSE_ROOT/script/test/fixtures/gitlab/envs | cut -d'=' -f1) # kubernetes test convert::expect_success_and_warning "kompose -f $KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/docker-compose.yml convert --stdout" "$KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/output-k8s.json" "Kubernetes provider doesn't support build key - ignoring" # openshift test -convert::expect_success "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/docker-compose.yml convert --stdout" "$KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/output-os.json" +convert::expect_success_warning "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/docker-compose.yml convert --stdout" "$KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/output-os.json" "Buildconfig using https://github.com/kubernetes-incubator/kompose.git::master as source." ###### # Tests related to docker-compose file in /script/test/fixtures/entrypoint-command From ba9995c17baabcbef0af9e1396a846682cf24ec2 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Mon, 21 Nov 2016 20:46:47 +0530 Subject: [PATCH 17/33] Update cmd to get git remote url for backwards compatibility with older git-2.6.x --- pkg/transformer/openshift/openshift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 38114e77..df815f0c 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -85,7 +85,7 @@ func hasGitBinary() bool { // getGitRemote gets git remote URI for the current git repo func getGitRemote(composeFileDir string, remote string) (string, error) { - cmd := exec.Command("git", "remote", "get-url", remote) + cmd := exec.Command("git", "ls-remote", "--get-url", remote) cmd.Dir = composeFileDir out, err := cmd.Output() if err != nil { From d8ec3db6736dbb4292b09d951654ca9a736cb9b9 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 24 Nov 2016 16:35:18 +0530 Subject: [PATCH 18/33] Bugfix in specifying custom build repo for buildconfig. --- pkg/transformer/openshift/openshift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index df815f0c..64603d27 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -303,7 +303,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C var err error var composeFileDir string hasBuild := false - buildRepo := "" + buildRepo := opt.BuildRepo for name, service := range komposeObject.ServiceConfigs { var objects []runtime.Object From 3b13dae5153d2fe7a795bc98f8c5bf3f4fa1b47a Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Mon, 28 Nov 2016 13:23:20 +0530 Subject: [PATCH 19/33] Fix govet warning for openshift buildconfig. Go vet warning was 'composite liternal uses unkeyed fields'. --- pkg/transformer/openshift/openshift.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 64603d27..a5baf1b0 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -173,14 +173,12 @@ func initBuildConfig(name string, service kobject.ServiceConfig, composeFileDir Name: name, }, Spec: buildapi.BuildConfigSpec{ - // Triggers - []buildapi.BuildTriggerPolicy{ + Triggers: []buildapi.BuildTriggerPolicy{ {Type: "ConfigChange"}, {Type: "ImageChange"}, }, - // RunPolicy - "Serial", - buildapi.CommonSpec{ + RunPolicy: "Serial", + CommonSpec: buildapi.CommonSpec{ Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ Ref: branch, From 875c7d95dc236a5b2d1eb5bb88cc6102f9c3f891 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 29 Nov 2016 14:38:38 +0530 Subject: [PATCH 20/33] Include buildconfigs in kompose deployment. --- pkg/transformer/openshift/openshift.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index a5baf1b0..b4e4599d 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -404,6 +404,12 @@ func (o *OpenShift) Deploy(komposeObject kobject.KomposeObject, opt kobject.Conv return err } logrus.Infof("Successfully created ImageStream: %s", t.Name) + case *buildapi.BuildConfig: + _, err := oclient.BuildConfigs(namespace).Create(t) + if err != nil { + return err + } + logrus.Infof("Successfully created BuildConfig: %s", t.Name) case *deployapi.DeploymentConfig: _, err := oclient.DeploymentConfigs(namespace).Create(t) if err != nil { From a1797a8d076c4193bfdec05bd26c8013d946f2e7 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 1 Dec 2016 16:52:31 +0530 Subject: [PATCH 21/33] In buildconfig, detect current branch and it's remote url as defaults. --- cli/command/command.go | 2 +- cmd/convert.go | 4 +-- pkg/transformer/openshift/openshift.go | 46 +++++++++++++++++++++++--- 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/cli/command/command.go b/cli/command/command.go index db4c917e..f2b8ba93 100644 --- a/cli/command/command.go +++ b/cli/command/command.go @@ -152,7 +152,7 @@ func ConvertOpenShiftCommand() cli.Command { }, cli.StringFlag{ Name: "build-branch", - Value: "master", + Value: "", Usage: "Specify repository branch to use for buildconfig (default master)", EnvVar: "BUILDBRANCH", }, diff --git a/cmd/convert.go b/cmd/convert.go index 4ca69efd..7d59f091 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -113,8 +113,8 @@ Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} Resource Flags: - --build-branch Specify repository branch to use for buildconfig (default master) - --build-repo Specify source repository for buildconfig (default remote origin) + --build-branch Specify repository branch to use for buildconfig (default is current branch name) + --build-repo Specify source repository for buildconfig (default is current branch's remote url -c, --chart Create a Helm chart for converted objects --daemon-set Generate a Kubernetes daemonset object -d, --deployment Generate a Kubernetes deployment object diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index b4e4599d..dbdd9949 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -100,6 +100,29 @@ func getGitRemote(composeFileDir string, remote string) (string, error) { return url, nil } +// getGitCurrentBranch gets current git branch name for the current git repo +func getGitCurrentBranch(composeFileDir string) (string, error) { + cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD") + cmd.Dir = composeFileDir + out, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimRight(string(out), "\n"), nil +} + +// getGitRemoteForBranch gets git remote for a branch +func getGitRemoteForBranch(composeFileDir string, branch string) (string, error) { + cmd := exec.Command("sh", "-c", fmt.Sprintf("git branch -r | grep %s", branch)) + cmd.Dir = composeFileDir + + out, err := cmd.Output() + if err != nil { + return "", err + } + return strings.Split(strings.Trim(string(out), "\n "), "/")[0], nil +} + // getComposeFileDir returns compose file directory func getComposeFileDir(inputFile string) (string, error) { if strings.Index(inputFile, "/") != 0 { @@ -302,6 +325,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C var composeFileDir string hasBuild := false buildRepo := opt.BuildRepo + buildBranch := opt.BuildBranch for name, service := range komposeObject.ServiceConfigs { var objects []runtime.Object @@ -326,12 +350,24 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C logrus.Warningf("Error in detecting compose file's directory.") continue } + if !hasGitBinary() && (buildRepo == "" || buildBranch == "") { + logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository to use for build using '--build-repo' option.") + } + if buildBranch == "" { + buildBranch, err = getGitCurrentBranch(composeFileDir) + if err != nil { + logrus.Fatalf("Buildconfig cannot be created because current git branch couldn't be detected.") + } + } if opt.BuildRepo == "" { - if hasGitBinary() { - buildRepo, err = getGitRemote(composeFileDir, "origin") - if err != nil { - logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") - } + var buildRemote string + buildRemote, err = getGitRemoteForBranch(composeFileDir, buildBranch) + if err != nil { + logrus.Fatalf("Buildconfig cannot be created because remote for current git branch couldn't be detected.") + } + buildRepo, err = getGitRemote(composeFileDir, buildRemote) + if err != nil { + logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") } } hasBuild = true From bbbd4037b41bef71abc3f82d2524b1e8bd5f90dc Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Fri, 2 Dec 2016 15:37:50 +0530 Subject: [PATCH 22/33] Update command to fetch current git remote for buildconfig. --- pkg/transformer/openshift/openshift.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index dbdd9949..67ec982d 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -112,15 +112,19 @@ func getGitCurrentBranch(composeFileDir string) (string, error) { } // getGitRemoteForBranch gets git remote for a branch -func getGitRemoteForBranch(composeFileDir string, branch string) (string, error) { - cmd := exec.Command("sh", "-c", fmt.Sprintf("git branch -r | grep %s", branch)) +func getGitRemoteForCurrentBranch(composeFileDir string) (string, error) { + cmd := exec.Command("sh", "-c", "git for-each-ref --format='%(upstream:short)' $(git symbolic-ref -q HEAD)") cmd.Dir = composeFileDir out, err := cmd.Output() if err != nil { return "", err } - return strings.Split(strings.Trim(string(out), "\n "), "/")[0], nil + output := strings.Trim(string(out), "\n ") + if output == "" { + return "", errors.New("Remote missing for current branch.") + } + return strings.Split(output, "/")[0], nil } // getComposeFileDir returns compose file directory @@ -361,7 +365,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } if opt.BuildRepo == "" { var buildRemote string - buildRemote, err = getGitRemoteForBranch(composeFileDir, buildBranch) + buildRemote, err = getGitRemoteForCurrentBranch(composeFileDir) if err != nil { logrus.Fatalf("Buildconfig cannot be created because remote for current git branch couldn't be detected.") } From f0165d209f3bb82d67005ab86cec8dafe32d5dfe Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Fri, 2 Dec 2016 17:23:06 +0530 Subject: [PATCH 23/33] In buildconfig, get git's current remote URL instead of current branch's remote. --- pkg/transformer/openshift/openshift.go | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 67ec982d..8f5af496 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -83,9 +83,9 @@ func hasGitBinary() bool { return err == nil } -// getGitRemote gets git remote URI for the current git repo -func getGitRemote(composeFileDir string, remote string) (string, error) { - cmd := exec.Command("git", "ls-remote", "--get-url", remote) +// getGitCurrentRemoteUrl gets current git remote URI for the current git repo +func getGitCurrentRemoteUrl(composeFileDir string) (string, error) { + cmd := exec.Command("git", "ls-remote", "--get-url") cmd.Dir = composeFileDir out, err := cmd.Output() if err != nil { @@ -111,22 +111,6 @@ func getGitCurrentBranch(composeFileDir string) (string, error) { return strings.TrimRight(string(out), "\n"), nil } -// getGitRemoteForBranch gets git remote for a branch -func getGitRemoteForCurrentBranch(composeFileDir string) (string, error) { - cmd := exec.Command("sh", "-c", "git for-each-ref --format='%(upstream:short)' $(git symbolic-ref -q HEAD)") - cmd.Dir = composeFileDir - - out, err := cmd.Output() - if err != nil { - return "", err - } - output := strings.Trim(string(out), "\n ") - if output == "" { - return "", errors.New("Remote missing for current branch.") - } - return strings.Split(output, "/")[0], nil -} - // getComposeFileDir returns compose file directory func getComposeFileDir(inputFile string) (string, error) { if strings.Index(inputFile, "/") != 0 { @@ -364,12 +348,10 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } } if opt.BuildRepo == "" { - var buildRemote string - buildRemote, err = getGitRemoteForCurrentBranch(composeFileDir) if err != nil { logrus.Fatalf("Buildconfig cannot be created because remote for current git branch couldn't be detected.") } - buildRepo, err = getGitRemote(composeFileDir, buildRemote) + buildRepo, err = getGitCurrentRemoteUrl(composeFileDir) if err != nil { logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.") } From c8d8cbbba946ab184a86f1776793048ff9be8286 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Mon, 5 Dec 2016 23:16:27 +0530 Subject: [PATCH 24/33] Added unittests for openshift buildconfig integration. --- pkg/test/git.go | 71 +++++++ pkg/transformer/openshift/openshift_test.go | 193 +++++++++++++++++++- 2 files changed, 260 insertions(+), 4 deletions(-) create mode 100644 pkg/test/git.go diff --git a/pkg/test/git.go b/pkg/test/git.go new file mode 100644 index 00000000..d3888bcf --- /dev/null +++ b/pkg/test/git.go @@ -0,0 +1,71 @@ +package test + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "testing" +) + +func NewCommand(cmd string) *exec.Cmd { + return exec.Command("sh", "-c", cmd) +} + +func CreateLocalDirectory(t *testing.T) string { + dir, err := ioutil.TempDir(os.TempDir(), "kompose-test-") + if err != nil { + t.Fatal(err) + } + return dir +} + +func CreateLocalGitDirectory(t *testing.T) string { + dir := CreateLocalDirectory(t) + cmd := NewCommand( + `git init && touch README && + git add README && + git commit -m 'testcommit'`) + cmd.Dir = dir + _, err := cmd.Output() + if err != nil { + fmt.Println("create local git dir", err) + t.Fatal(err) + } + return dir +} + +func SetGitRemote(t *testing.T, dir string, remote string, remoteUrl string) { + cmd := NewCommand("git remote add newremote https://git.test.com/somerepo") + cmd.Dir = dir + _, err := cmd.Output() + if err != nil { + fmt.Println("set git remote", err) + t.Fatal(err) + } +} + +func CreateGitRemoteBranch(t *testing.T, dir string, branch string, remote string) { + cmd := NewCommand( + fmt.Sprintf(`git checkout -b %s && + git config branch.%s.remote %s && + git config branch.%s.merge refs/heads/%s`, + branch, branch, remote, branch, branch)) + cmd.Dir = dir + + _, err := cmd.Output() + if err != nil { + fmt.Println("create git branch", err) + t.Fatal(err) + } +} + +func CreateSubdir(t *testing.T, dir string, subdir string) { + cmd := NewCommand(fmt.Sprintf("mkdir -p %s", subdir)) + cmd.Dir = dir + + _, err := cmd.Output() + if err != nil { + t.Fatal(err) + } +} diff --git a/pkg/transformer/openshift/openshift_test.go b/pkg/transformer/openshift/openshift_test.go index 1772802b..0fcef199 100644 --- a/pkg/transformer/openshift/openshift_test.go +++ b/pkg/transformer/openshift/openshift_test.go @@ -17,11 +17,17 @@ limitations under the License. package openshift import ( - "github.com/kubernetes-incubator/kompose/pkg/kobject" - deployapi "github.com/openshift/origin/pkg/deploy/api" + "os" + "path/filepath" + "testing" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/runtime" - "testing" + + deployapi "github.com/openshift/origin/pkg/deploy/api" + + "github.com/kubernetes-incubator/kompose/pkg/kobject" + "github.com/kubernetes-incubator/kompose/pkg/test" ) func newServiceConfig() kobject.ServiceConfig { @@ -118,5 +124,184 @@ func TestKomposeConvertRoute(t *testing.T) { if route.Spec.Host != sc.ExposeService { t.Errorf("Expected %s for Spec.Host, actual %s", sc.ExposeService, route.Spec.Host) } - +} + +func TestGetGitRemote(t *testing.T) { + var output string + var err error + + gitDir := test.CreateLocalGitDirectory(t) + test.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") + test.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") + dir := test.CreateLocalDirectory(t) + defer os.RemoveAll(gitDir) + defer os.RemoveAll(dir) + + testCases := map[string]struct { + expectError bool + dir string + branch string + output string + }{ + "Get git remote for branch success": {false, gitDir, "newbranch", "https://git.test.com/somerepo.git"}, + "Get git remote error in non git dir": {true, dir, "", ""}, + } + + for name, test := range testCases { + t.Log("Test case: ", name) + output, err = getGitCurrentRemoteUrl(test.dir) + + if test.expectError { + if err == nil { + t.Errorf("Expected error, got success instead!") + } + } else { + if err != nil { + t.Errorf("Expected success, got error: %v", err) + } + if output != test.output { + t.Errorf("Expected: %#v, got: %#v", test.output, output) + } + } + } +} + +func TestGitGetCurrentBranch(t *testing.T) { + var output string + var err error + + gitDir := test.CreateLocalGitDirectory(t) + test.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") + test.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") + dir := test.CreateLocalDirectory(t) + defer os.RemoveAll(gitDir) + defer os.RemoveAll(dir) + + testCases := map[string]struct { + expectError bool + dir string + output string + }{ + "Get git current branch success": {false, gitDir, "newbranch"}, + "Get git current branch error": {true, dir, ""}, + } + + for name, test := range testCases { + t.Log("Test case: ", name) + output, err = getGitCurrentBranch(test.dir) + + if test.expectError { + if err == nil { + t.Errorf("Expected error, got success instead!") + } + } else { + if err != nil { + t.Errorf("Expected success, got error: %v", err) + } + if output != test.output { + t.Errorf("Expected: %#v, got: %#v", test.output, output) + } + } + } +} + +func TestGetComposeFileDir(t *testing.T) { + var output string + var err error + wd, _ := os.Getwd() + + testCases := map[string]struct { + inputFile string + output string + }{ + "Get compose file dir for relative input file path": {"foo/bar.yaml", filepath.Join(wd, "foo")}, + "Get compose file dir for abs input file path": {"/abs/path/to/compose.yaml", "/abs/path/to"}, + } + + for name, test := range testCases { + t.Log("Test case: ", name) + + output, err = getComposeFileDir(test.inputFile) + + if err != nil { + t.Errorf("Expected success, got error: %#v", err) + } + + if output != test.output { + t.Errorf("Expected output: %#v, got: %#v", test.output, output) + } + } +} + +func TestGetAbsBuildContext(t *testing.T) { + var output string + var err error + + gitDir := test.CreateLocalGitDirectory(t) + test.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") + test.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") + test.CreateSubdir(t, gitDir, "a/b") + dir := test.CreateLocalDirectory(t) + defer os.RemoveAll(gitDir) + defer os.RemoveAll(dir) + + testCases := map[string]struct { + expectError bool + context string + composeFileDir string + output string + }{ + "Get abs build context success": {false, "./b/build", filepath.Join(gitDir, "a"), "a/b/build"}, + "Get abs build context error": {true, "", dir, ""}, + } + + for name, test := range testCases { + t.Log("Test case: ", name) + output, err = getAbsBuildContext(test.context, test.composeFileDir) + + if test.expectError { + if err == nil { + t.Errorf("Expected error, got success instead!") + } + } else { + if err != nil { + t.Errorf("Expected success, got error: %v", err) + } + if output != test.output { + t.Errorf("Expected: %#v, got: %#v", test.output, output) + } + } + } +} + +func TestInitBuildConfig(t *testing.T) { + dir := test.CreateLocalGitDirectory(t) + test.CreateSubdir(t, dir, "a/build") + defer os.RemoveAll(dir) + + serviceName := "serviceA" + composeFileDir := filepath.Join(dir, "a") + repo := "https://git.test.com/org/repo" + branch := "somebranch" + sc := kobject.ServiceConfig{ + Build: "./build", + } + bc := initBuildConfig(serviceName, sc, composeFileDir, repo, branch) + + testCases := map[string]struct { + field string + value string + }{ + "Assert buildconfig source git URI": {bc.Spec.CommonSpec.Source.Git.URI, repo}, + "Assert buildconfig source git Ref": {bc.Spec.CommonSpec.Source.Git.Ref, branch}, + "Assert buildconfig source context dir": {bc.Spec.CommonSpec.Source.ContextDir, "a/build"}, + "Assert buildconfig output name": {bc.Spec.CommonSpec.Output.To.Name, serviceName + ":latest"}, + } + + for name, test := range testCases { + t.Log("Test case: ", name) + if test.field != test.value { + t.Errorf("Expected: %#v, got: %#v", test.value, test.field) + } + } } From ec09ca428b6c69b195195460224081c9ec1dc251 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 6 Dec 2016 02:59:33 +0530 Subject: [PATCH 25/33] Generate openshift imagestream when build is absent. --- pkg/transformer/openshift/openshift.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 8f5af496..4db20666 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -140,9 +140,7 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) tag := getImageTag(service.Image) var tags map[string]imageapi.TagReference - if service.Build != "" { - tags = map[string]imageapi.TagReference{} - } else { + if service.Build == "" { tags = map[string]imageapi.TagReference{ tag: imageapi.TagReference{ From: &api.ObjectReference{ From b62fb02571a51730864807833c5f1ddcff6bb017 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 6 Dec 2016 16:16:25 +0530 Subject: [PATCH 26/33] Fix openshift buildconfig info logs based on review. --- pkg/transformer/openshift/openshift.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 4db20666..5390ffce 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -337,7 +337,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C continue } if !hasGitBinary() && (buildRepo == "" || buildBranch == "") { - logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository to use for build using '--build-repo' option.") + logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository and branch to use for build using '--build-repo', '--build-branch' options respectively") } if buildBranch == "" { buildBranch, err = getGitCurrentBranch(composeFileDir) From 4ead7156f4ed33fce25fe4924c4ed9e6b2f96812 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 14 Dec 2016 12:56:15 +0530 Subject: [PATCH 27/33] Renamed pkg test to testutils. --- pkg/{test => testutils}/git.go | 2 +- pkg/transformer/openshift/openshift_test.go | 32 ++++++++++----------- 2 files changed, 17 insertions(+), 17 deletions(-) rename pkg/{test => testutils}/git.go (98%) diff --git a/pkg/test/git.go b/pkg/testutils/git.go similarity index 98% rename from pkg/test/git.go rename to pkg/testutils/git.go index d3888bcf..158f9932 100644 --- a/pkg/test/git.go +++ b/pkg/testutils/git.go @@ -1,4 +1,4 @@ -package test +package testutils import ( "fmt" diff --git a/pkg/transformer/openshift/openshift_test.go b/pkg/transformer/openshift/openshift_test.go index 0fcef199..04b53fb0 100644 --- a/pkg/transformer/openshift/openshift_test.go +++ b/pkg/transformer/openshift/openshift_test.go @@ -27,7 +27,7 @@ import ( deployapi "github.com/openshift/origin/pkg/deploy/api" "github.com/kubernetes-incubator/kompose/pkg/kobject" - "github.com/kubernetes-incubator/kompose/pkg/test" + "github.com/kubernetes-incubator/kompose/pkg/testutils" ) func newServiceConfig() kobject.ServiceConfig { @@ -130,10 +130,10 @@ func TestGetGitRemote(t *testing.T) { var output string var err error - gitDir := test.CreateLocalGitDirectory(t) - test.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") - test.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") - dir := test.CreateLocalDirectory(t) + gitDir := testutils.CreateLocalGitDirectory(t) + testutils.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") + testutils.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") + dir := testutils.CreateLocalDirectory(t) defer os.RemoveAll(gitDir) defer os.RemoveAll(dir) @@ -170,10 +170,10 @@ func TestGitGetCurrentBranch(t *testing.T) { var output string var err error - gitDir := test.CreateLocalGitDirectory(t) - test.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") - test.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") - dir := test.CreateLocalDirectory(t) + gitDir := testutils.CreateLocalGitDirectory(t) + testutils.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") + testutils.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") + dir := testutils.CreateLocalDirectory(t) defer os.RemoveAll(gitDir) defer os.RemoveAll(dir) @@ -237,11 +237,11 @@ func TestGetAbsBuildContext(t *testing.T) { var output string var err error - gitDir := test.CreateLocalGitDirectory(t) - test.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") - test.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") - test.CreateSubdir(t, gitDir, "a/b") - dir := test.CreateLocalDirectory(t) + gitDir := testutils.CreateLocalGitDirectory(t) + testutils.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo") + testutils.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote") + testutils.CreateSubdir(t, gitDir, "a/b") + dir := testutils.CreateLocalDirectory(t) defer os.RemoveAll(gitDir) defer os.RemoveAll(dir) @@ -275,8 +275,8 @@ func TestGetAbsBuildContext(t *testing.T) { } func TestInitBuildConfig(t *testing.T) { - dir := test.CreateLocalGitDirectory(t) - test.CreateSubdir(t, dir, "a/build") + dir := testutils.CreateLocalGitDirectory(t) + testutils.CreateSubdir(t, dir, "a/build") defer os.RemoveAll(dir) serviceName := "serviceA" From a753f35d4250270d661a8f3092234211d72b022a Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 14 Dec 2016 13:05:45 +0530 Subject: [PATCH 28/33] Added docs for openshift unit tests. --- pkg/transformer/openshift/openshift_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/transformer/openshift/openshift_test.go b/pkg/transformer/openshift/openshift_test.go index 04b53fb0..6b9e5720 100644 --- a/pkg/transformer/openshift/openshift_test.go +++ b/pkg/transformer/openshift/openshift_test.go @@ -126,6 +126,7 @@ func TestKomposeConvertRoute(t *testing.T) { } } +//Test getting git remote url for a directory func TestGetGitRemote(t *testing.T) { var output string var err error @@ -166,6 +167,7 @@ func TestGetGitRemote(t *testing.T) { } } +// Test getting current git branch in a directory func TestGitGetCurrentBranch(t *testing.T) { var output string var err error @@ -205,6 +207,7 @@ func TestGitGetCurrentBranch(t *testing.T) { } } +// Test getting compose file directory path: relative to project dir or absolute path func TestGetComposeFileDir(t *testing.T) { var output string var err error @@ -233,6 +236,7 @@ func TestGetComposeFileDir(t *testing.T) { } } +// Test getting build context relative to project's root dir func TestGetAbsBuildContext(t *testing.T) { var output string var err error @@ -274,6 +278,7 @@ func TestGetAbsBuildContext(t *testing.T) { } } +// Test initializing buildconfig for a service func TestInitBuildConfig(t *testing.T) { dir := testutils.CreateLocalGitDirectory(t) testutils.CreateSubdir(t, dir, "a/build") From d4561c6f51241ff204132d713e19732f59ff40f0 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 22 Dec 2016 23:58:10 +0530 Subject: [PATCH 29/33] Updated glide dependencies Added: - github.com/openshift/origin/pkg/build/api/install - github.com/openshift/origin/pkg/build/api/v1 --- glide.lock | 8 +- glide.yaml | 2 + .../origin/pkg/build/api/v1/generated.pb.go | 1260 +++++++++++++---- .../origin/pkg/build/api/v1/generated.proto | 776 ---------- .../origin/pkg/build/api/v1/swagger_doc.go | 41 +- .../origin/pkg/build/api/v1/types.go | 58 +- .../build/api/v1/zz_generated.conversion.go | 96 +- .../pkg/build/api/v1/zz_generated.deepcopy.go | 74 +- .../origin/pkg/build/client/clients.go | 109 ++ .../openshift/origin/pkg/build/util/doc.go | 3 + .../openshift/origin/pkg/build/util/util.go | 152 ++ 11 files changed, 1466 insertions(+), 1113 deletions(-) delete mode 100644 vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto create mode 100644 vendor/github.com/openshift/origin/pkg/build/client/clients.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/util/doc.go create mode 100644 vendor/github.com/openshift/origin/pkg/build/util/util.go diff --git a/glide.lock b/glide.lock index e870529e..8eea6d7c 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: ce7cedb6d1c0e23f02afdd8bc7d1dabb047ca846cacd597e436cf93549c9ac79 -updated: 2016-12-22T09:07:17.203828556-05:00 +hash: c7cb14f4249738a47020f9dc1964832921c3f5b8bf5a1c50f5b2fa15eaebb6fe +updated: 2016-12-26T10:22:49.439519344+05:30 imports: - name: cloud.google.com/go version: 3b1ae45394a234c385be014e9a488f2bb6eef821 @@ -369,6 +369,10 @@ imports: - pkg/auth/authenticator/request/x509request - pkg/authorization/api - pkg/build/api + - pkg/build/api/install + - pkg/build/api/v1 + - pkg/build/client + - pkg/build/util - pkg/client - pkg/cmd/cli/config - pkg/cmd/util diff --git a/glide.yaml b/glide.yaml index aa23c7af..491060be 100644 --- a/glide.yaml +++ b/glide.yaml @@ -20,6 +20,8 @@ import: - package: github.com/openshift/origin version: v1.4.0-rc1 subpackages: + - pkg/build/api/install + - pkg/build/api/v1 - pkg/client - pkg/cmd/cli/config - pkg/deploy/api diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go index 2cd5d28c..034e7a50 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.pb.go @@ -39,9 +39,12 @@ GitSourceRevision ImageChangeCause ImageChangeTrigger + ImageLabel ImageSource ImageSourcePath JenkinsPipelineBuildStrategy + OptionalNodeSelector + ProxyConfig SecretBuildSource SecretSpec SourceBuildStrategy @@ -196,43 +199,55 @@ func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigge func (*ImageChangeTrigger) ProtoMessage() {} func (*ImageChangeTrigger) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (m *ImageLabel) Reset() { *m = ImageLabel{} } +func (*ImageLabel) ProtoMessage() {} +func (*ImageLabel) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + func (m *ImageSource) Reset() { *m = ImageSource{} } func (*ImageSource) ProtoMessage() {} -func (*ImageSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*ImageSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} } func (*ImageSourcePath) ProtoMessage() {} -func (*ImageSourcePath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*ImageSourcePath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} } func (*JenkinsPipelineBuildStrategy) ProtoMessage() {} func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptorGenerated, []int{33} } +func (m *OptionalNodeSelector) Reset() { *m = OptionalNodeSelector{} } +func (*OptionalNodeSelector) ProtoMessage() {} +func (*OptionalNodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *ProxyConfig) Reset() { *m = ProxyConfig{} } +func (*ProxyConfig) ProtoMessage() {} +func (*ProxyConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} } func (*SecretBuildSource) ProtoMessage() {} -func (*SecretBuildSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*SecretBuildSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *SecretSpec) Reset() { *m = SecretSpec{} } func (*SecretSpec) ProtoMessage() {} -func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} } func (*SourceBuildStrategy) ProtoMessage() {} -func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *SourceControlUser) Reset() { *m = SourceControlUser{} } func (*SourceControlUser) ProtoMessage() {} -func (*SourceControlUser) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*SourceControlUser) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *SourceRevision) Reset() { *m = SourceRevision{} } func (*SourceRevision) ProtoMessage() {} -func (*SourceRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*SourceRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} } func (*WebHookTrigger) ProtoMessage() {} -func (*WebHookTrigger) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*WebHookTrigger) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func init() { proto.RegisterType((*BinaryBuildRequestOptions)(nil), "github.com.openshift.origin.pkg.build.api.v1.BinaryBuildRequestOptions") @@ -265,9 +280,12 @@ func init() { proto.RegisterType((*GitSourceRevision)(nil), "github.com.openshift.origin.pkg.build.api.v1.GitSourceRevision") proto.RegisterType((*ImageChangeCause)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageChangeCause") proto.RegisterType((*ImageChangeTrigger)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageChangeTrigger") + proto.RegisterType((*ImageLabel)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageLabel") proto.RegisterType((*ImageSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageSource") proto.RegisterType((*ImageSourcePath)(nil), "github.com.openshift.origin.pkg.build.api.v1.ImageSourcePath") proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.JenkinsPipelineBuildStrategy") + proto.RegisterType((*OptionalNodeSelector)(nil), "github.com.openshift.origin.pkg.build.api.v1.OptionalNodeSelector") + proto.RegisterType((*ProxyConfig)(nil), "github.com.openshift.origin.pkg.build.api.v1.ProxyConfig") proto.RegisterType((*SecretBuildSource)(nil), "github.com.openshift.origin.pkg.build.api.v1.SecretBuildSource") proto.RegisterType((*SecretSpec)(nil), "github.com.openshift.origin.pkg.build.api.v1.SecretSpec") proto.RegisterType((*SourceBuildStrategy)(nil), "github.com.openshift.origin.pkg.build.api.v1.SourceBuildStrategy") @@ -711,6 +729,18 @@ func (m *BuildOutput) MarshalTo(data []byte) (int, error) { } i += n13 } + if len(m.ImageLabels) > 0 { + for _, msg := range m.ImageLabels { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1300,6 +1330,16 @@ func (m *CommonSpec) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(*m.CompletionDeadlineSeconds)) } + if m.NodeSelector != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeSelector.Size())) + n42, err := m.NodeSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + } return i, nil } @@ -1321,20 +1361,20 @@ func (m *CustomBuildStrategy) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n42, err := m.From.MarshalTo(data[i:]) + n43, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n42 + i += n43 if m.PullSecret != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) - n43, err := m.PullSecret.MarshalTo(data[i:]) + n44, err := m.PullSecret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n43 + i += n44 } if len(m.Env) > 0 { for _, msg := range m.Env { @@ -1402,21 +1442,21 @@ func (m *DockerBuildStrategy) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n44, err := m.From.MarshalTo(data[i:]) + n45, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n44 + i += n45 } if m.PullSecret != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) - n45, err := m.PullSecret.MarshalTo(data[i:]) + n46, err := m.PullSecret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n45 + i += n46 } data[i] = 0x18 i++ @@ -1472,11 +1512,11 @@ func (m *GenericWebHookCause) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Revision.Size())) - n46, err := m.Revision.MarshalTo(data[i:]) + n47, err := m.Revision.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n46 + i += n47 } data[i] = 0x12 i++ @@ -1508,11 +1548,11 @@ func (m *GenericWebHookEvent) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Git.Size())) - n47, err := m.Git.MarshalTo(data[i:]) + n48, err := m.Git.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n47 + i += n48 } if len(m.Env) > 0 { for _, msg := range m.Env { @@ -1552,18 +1592,14 @@ func (m *GitBuildSource) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintGenerated(data, i, uint64(len(m.Ref))) i += copy(data[i:], m.Ref) - if m.HTTPProxy != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.HTTPProxy))) - i += copy(data[i:], *m.HTTPProxy) - } - if m.HTTPSProxy != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.HTTPSProxy))) - i += copy(data[i:], *m.HTTPSProxy) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ProxyConfig.Size())) + n49, err := m.ProxyConfig.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n49 return i, nil } @@ -1586,11 +1622,11 @@ func (m *GitHubWebHookCause) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Revision.Size())) - n48, err := m.Revision.MarshalTo(data[i:]) + n50, err := m.Revision.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n48 + i += n50 } data[i] = 0x12 i++ @@ -1617,19 +1653,19 @@ func (m *GitInfo) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.GitBuildSource.Size())) - n49, err := m.GitBuildSource.MarshalTo(data[i:]) + n51, err := m.GitBuildSource.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n49 + i += n51 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.GitSourceRevision.Size())) - n50, err := m.GitSourceRevision.MarshalTo(data[i:]) + n52, err := m.GitSourceRevision.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n50 + i += n52 return i, nil } @@ -1655,19 +1691,19 @@ func (m *GitSourceRevision) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Author.Size())) - n51, err := m.Author.MarshalTo(data[i:]) + n53, err := m.Author.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n51 + i += n53 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Committer.Size())) - n52, err := m.Committer.MarshalTo(data[i:]) + n54, err := m.Committer.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n52 + i += n54 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Message))) @@ -1698,11 +1734,11 @@ func (m *ImageChangeCause) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.FromRef.Size())) - n53, err := m.FromRef.MarshalTo(data[i:]) + n55, err := m.FromRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n53 + i += n55 } return i, nil } @@ -1730,15 +1766,41 @@ func (m *ImageChangeTrigger) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n54, err := m.From.MarshalTo(data[i:]) + n56, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n54 + i += n56 } return i, nil } +func (m *ImageLabel) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ImageLabel) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + return i, nil +} + func (m *ImageSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -1757,11 +1819,11 @@ func (m *ImageSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n55, err := m.From.MarshalTo(data[i:]) + n57, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n55 + i += n57 if len(m.Paths) > 0 { for _, msg := range m.Paths { data[i] = 0x12 @@ -1778,11 +1840,11 @@ func (m *ImageSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) - n56, err := m.PullSecret.MarshalTo(data[i:]) + n58, err := m.PullSecret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n56 + i += n58 } return i, nil } @@ -1839,6 +1901,77 @@ func (m *JenkinsPipelineBuildStrategy) MarshalTo(data []byte) (int, error) { return i, nil } +func (m OptionalNodeSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m OptionalNodeSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for k := range m { + data[i] = 0xa + i++ + v := m[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *ProxyConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ProxyConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HTTPProxy != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.HTTPProxy))) + i += copy(data[i:], *m.HTTPProxy) + } + if m.HTTPSProxy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.HTTPSProxy))) + i += copy(data[i:], *m.HTTPSProxy) + } + if m.NoProxy != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.NoProxy))) + i += copy(data[i:], *m.NoProxy) + } + return i, nil +} + func (m *SecretBuildSource) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -1857,11 +1990,11 @@ func (m *SecretBuildSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) - n57, err := m.Secret.MarshalTo(data[i:]) + n59, err := m.Secret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n57 + i += n59 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.DestinationDir))) @@ -1887,11 +2020,11 @@ func (m *SecretSpec) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.SecretSource.Size())) - n58, err := m.SecretSource.MarshalTo(data[i:]) + n60, err := m.SecretSource.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n58 + i += n60 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) @@ -1917,20 +2050,20 @@ func (m *SourceBuildStrategy) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n59, err := m.From.MarshalTo(data[i:]) + n61, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n59 + i += n61 if m.PullSecret != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PullSecret.Size())) - n60, err := m.PullSecret.MarshalTo(data[i:]) + n62, err := m.PullSecret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n60 + i += n62 } if len(m.Env) > 0 { for _, msg := range m.Env { @@ -1970,11 +2103,11 @@ func (m *SourceBuildStrategy) MarshalTo(data []byte) (int, error) { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.RuntimeImage.Size())) - n61, err := m.RuntimeImage.MarshalTo(data[i:]) + n63, err := m.RuntimeImage.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n61 + i += n63 } if len(m.RuntimeArtifacts) > 0 { for _, msg := range m.RuntimeArtifacts { @@ -2040,11 +2173,11 @@ func (m *SourceRevision) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Git.Size())) - n62, err := m.Git.MarshalTo(data[i:]) + n64, err := m.Git.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n62 + i += n64 } return i, nil } @@ -2256,6 +2389,12 @@ func (m *BuildOutput) Size() (n int) { l = m.PushSecret.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.ImageLabels) > 0 { + for _, e := range m.ImageLabels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -2484,6 +2623,10 @@ func (m *CommonSpec) Size() (n int) { if m.CompletionDeadlineSeconds != nil { n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds)) } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2576,14 +2719,8 @@ func (m *GitBuildSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Ref) n += 1 + l + sovGenerated(uint64(l)) - if m.HTTPProxy != nil { - l = len(*m.HTTPProxy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTPSProxy != nil { - l = len(*m.HTTPSProxy) - n += 1 + l + sovGenerated(uint64(l)) - } + l = m.ProxyConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2647,6 +2784,16 @@ func (m *ImageChangeTrigger) Size() (n int) { return n } +func (m *ImageLabel) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ImageSource) Size() (n int) { var l int _ = l @@ -2685,6 +2832,38 @@ func (m *JenkinsPipelineBuildStrategy) Size() (n int) { return n } +func (m OptionalNodeSelector) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for k, v := range m { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ProxyConfig) Size() (n int) { + var l int + _ = l + if m.HTTPProxy != nil { + l = len(*m.HTTPProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPSProxy != nil { + l = len(*m.HTTPSProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NoProxy != nil { + l = len(*m.NoProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *SecretBuildSource) Size() (n int) { var l int _ = l @@ -2913,6 +3092,7 @@ func (this *BuildOutput) String() string { s := strings.Join([]string{`&BuildOutput{`, `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1) + `,`, `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, + `ImageLabels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImageLabels), "ImageLabel", "ImageLabel", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3045,6 +3225,7 @@ func (this *CommonSpec) String() string { `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "k8s_io_kubernetes_pkg_api_v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`, `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "OptionalNodeSelector", "OptionalNodeSelector", 1) + `,`, `}`, }, "") return s @@ -3110,8 +3291,7 @@ func (this *GitBuildSource) String() string { s := strings.Join([]string{`&GitBuildSource{`, `URI:` + fmt.Sprintf("%v", this.URI) + `,`, `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, - `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `ProxyConfig:` + strings.Replace(strings.Replace(this.ProxyConfig.String(), "ProxyConfig", "ProxyConfig", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3173,6 +3353,17 @@ func (this *ImageChangeTrigger) String() string { }, "") return s } +func (this *ImageLabel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLabel{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} func (this *ImageSource) String() string { if this == nil { return "nil" @@ -3207,6 +3398,18 @@ func (this *JenkinsPipelineBuildStrategy) String() string { }, "") return s } +func (this *ProxyConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProxyConfig{`, + `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, + `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `NoProxy:` + valueToStringGenerated(this.NoProxy) + `,`, + `}`, + }, "") + return s +} func (this *SecretBuildSource) String() string { if this == nil { return "nil" @@ -4777,6 +4980,37 @@ func (m *BuildOutput) Unmarshal(data []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLabels = append(m.ImageLabels, ImageLabel{}) + if err := m.ImageLabels[len(m.ImageLabels)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -6771,6 +7005,39 @@ func (m *CommonSpec) Unmarshal(data []byte) error { } } m.CompletionDeadlineSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = OptionalNodeSelector{} + } + if err := m.NodeSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -7596,9 +7863,9 @@ func (m *GitBuildSource) Unmarshal(data []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProxyConfig", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7608,51 +7875,21 @@ func (m *GitBuildSource) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) - m.HTTPProxy = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + if err := m.ProxyConfig.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(data[iNdEx:postIndex]) - m.HTTPSProxy = &s iNdEx = postIndex default: iNdEx = preIndex @@ -8289,6 +8526,114 @@ func (m *ImageChangeTrigger) Unmarshal(data []byte) error { } return nil } +func (m *ImageLabel) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ImageSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -8649,6 +8994,307 @@ func (m *JenkinsPipelineBuildStrategy) Unmarshal(data []byte) error { } return nil } +func (m *OptionalNodeSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if *m == nil { + *m = make(map[string]string) + } + (*m)[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.HTTPProxy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.HTTPSProxy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.NoProxy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SecretBuildSource) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -9570,196 +10216,208 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 3054 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, - 0x15, 0xce, 0x92, 0x14, 0x45, 0x0d, 0xf5, 0x3b, 0x72, 0x62, 0x59, 0x49, 0x1c, 0x67, 0x93, 0x16, - 0x29, 0x62, 0x93, 0x90, 0x12, 0xa7, 0x0e, 0x12, 0xbb, 0x16, 0x29, 0xff, 0x48, 0x95, 0x6d, 0xf5, - 0x49, 0x89, 0x53, 0x17, 0x6d, 0xb1, 0xa2, 0x86, 0xd4, 0x5a, 0xe4, 0x2e, 0xbb, 0xbb, 0x54, 0x2c, - 0xa0, 0x05, 0x82, 0x16, 0x05, 0xd2, 0x5b, 0xd1, 0xf6, 0xd0, 0x4b, 0x5b, 0x04, 0xe8, 0xcf, 0x21, - 0x87, 0xa2, 0x3f, 0x87, 0x00, 0x3d, 0x15, 0xe8, 0xc1, 0x47, 0x1f, 0x7b, 0x0a, 0x9a, 0xe4, 0xd0, - 0x73, 0xaf, 0x39, 0x75, 0x7e, 0xde, 0xee, 0xce, 0x2e, 0x29, 0xc5, 0x5c, 0x59, 0x46, 0x81, 0x1e, - 0x28, 0x68, 0xdf, 0xbc, 0xf9, 0xde, 0xcc, 0x9b, 0x37, 0xef, 0x6f, 0x97, 0xbc, 0xd9, 0xb2, 0x83, - 0x9d, 0xde, 0x56, 0xa5, 0xe1, 0x76, 0xaa, 0x6e, 0x97, 0x39, 0xfe, 0x8e, 0xdd, 0x0c, 0xaa, 0xae, - 0x67, 0xb7, 0x6c, 0xa7, 0xda, 0xdd, 0x6d, 0x55, 0xb7, 0x7a, 0x76, 0x7b, 0xbb, 0x6a, 0x75, 0xed, - 0xea, 0xde, 0x42, 0xb5, 0xc5, 0x1c, 0xe6, 0x59, 0x01, 0xdb, 0xae, 0x74, 0x3d, 0x37, 0x70, 0xe9, - 0xd9, 0x78, 0x76, 0x25, 0x9a, 0x5d, 0x51, 0xb3, 0x2b, 0x7c, 0x76, 0x45, 0xce, 0xae, 0xf0, 0xd9, - 0x95, 0xbd, 0x85, 0xf9, 0x73, 0x9a, 0xac, 0x96, 0xdb, 0x72, 0xab, 0x12, 0x64, 0xab, 0xd7, 0x94, - 0x4f, 0xf2, 0x41, 0xfe, 0xa7, 0xc0, 0xe7, 0xcf, 0xef, 0x5e, 0xf0, 0x2b, 0xb6, 0x5b, 0xdd, 0xed, - 0x6d, 0x31, 0xcf, 0x61, 0x01, 0xf3, 0xe5, 0x82, 0xc4, 0x52, 0x7a, 0xce, 0x1e, 0xf3, 0x7c, 0xdb, - 0x75, 0xd8, 0x76, 0x7a, 0x4d, 0xf3, 0x67, 0x0f, 0x9e, 0xd6, 0xbf, 0x83, 0xf9, 0x73, 0x83, 0xb9, - 0xbd, 0x9e, 0x13, 0xd8, 0x1d, 0xd6, 0xc7, 0xbe, 0x30, 0x98, 0xbd, 0x17, 0xd8, 0xed, 0xaa, 0xed, - 0x04, 0x7e, 0xe0, 0xa5, 0xa7, 0x98, 0x1f, 0x15, 0xc8, 0xa9, 0x9a, 0xed, 0x58, 0xde, 0x7e, 0x4d, - 0x28, 0x03, 0xd8, 0xf7, 0x7a, 0xcc, 0x0f, 0x6e, 0x75, 0x03, 0xbe, 0x7c, 0x9f, 0xbe, 0x43, 0x4a, - 0x1d, 0x16, 0x58, 0xdb, 0x56, 0x60, 0xcd, 0x19, 0x67, 0x8c, 0x97, 0xca, 0x8b, 0x2f, 0x55, 0x94, - 0x8c, 0x4a, 0x2c, 0x43, 0xaa, 0x52, 0x29, 0xb1, 0x72, 0x6b, 0xeb, 0x2e, 0x6b, 0x04, 0x37, 0xf8, - 0x9c, 0x1a, 0xbd, 0xff, 0xf1, 0x73, 0x4f, 0x7c, 0xfa, 0xf1, 0x73, 0x24, 0xa6, 0x41, 0x84, 0x46, - 0xbf, 0x4c, 0x8a, 0x96, 0x7f, 0xd5, 0x6e, 0xb3, 0xb9, 0x1c, 0xc7, 0x1d, 0xab, 0x4d, 0x22, 0x77, - 0x71, 0x49, 0x52, 0x01, 0x47, 0xe9, 0x6b, 0x64, 0xd2, 0x63, 0x7b, 0xb6, 0xd0, 0x66, 0xdd, 0xed, - 0x74, 0xec, 0x60, 0x2e, 0x9f, 0xe4, 0x57, 0x54, 0x48, 0x71, 0xd1, 0xd7, 0xc9, 0x54, 0x48, 0xb9, - 0xc1, 0x7c, 0xdf, 0x6a, 0xb1, 0xb9, 0x82, 0x9c, 0x38, 0x85, 0x13, 0x47, 0x91, 0x0c, 0x69, 0x3e, - 0x5a, 0x23, 0x34, 0x24, 0x2d, 0xf5, 0x82, 0x1d, 0xd7, 0xbb, 0x69, 0x75, 0xd8, 0xdc, 0x88, 0x9c, - 0x1d, 0x6d, 0x2a, 0x1e, 0x81, 0x01, 0xdc, 0xf4, 0x0a, 0x99, 0x4d, 0x52, 0xaf, 0x74, 0x2c, 0xbb, - 0x3d, 0x57, 0x94, 0x20, 0xb3, 0x08, 0x52, 0xd6, 0x86, 0x60, 0x10, 0x3f, 0xfd, 0x3a, 0x79, 0x32, - 0xb9, 0xaf, 0x80, 0xa9, 0xd5, 0x8c, 0x4a, 0xa0, 0x27, 0x11, 0x68, 0x22, 0x31, 0x08, 0x83, 0xe7, - 0xd0, 0x9b, 0xe4, 0xa9, 0xbe, 0x01, 0xb5, 0xac, 0x92, 0x44, 0x7b, 0x0a, 0xd1, 0x26, 0x93, 0xa3, - 0x70, 0xc0, 0x2c, 0xf3, 0x0d, 0x32, 0xa3, 0x59, 0xce, 0x86, 0xdb, 0xf3, 0x1a, 0x4c, 0x3b, 0x57, - 0xe3, 0xb0, 0x73, 0x35, 0x7f, 0x93, 0x23, 0x23, 0x72, 0xde, 0x31, 0xda, 0xd8, 0x37, 0x49, 0xc1, - 0xef, 0xb2, 0x86, 0xb4, 0xb0, 0xf2, 0xe2, 0x57, 0x2b, 0xc3, 0xb8, 0x83, 0x8a, 0xda, 0x14, 0x9f, - 0x5e, 0x1b, 0x47, 0x21, 0x05, 0xf1, 0x04, 0x12, 0x92, 0x5a, 0xa4, 0xe8, 0x07, 0x56, 0xd0, 0xf3, - 0xa5, 0x39, 0x96, 0x17, 0x5f, 0xcf, 0x02, 0x2e, 0x01, 0x62, 0x0d, 0xa9, 0x67, 0x40, 0x60, 0xf3, - 0x4f, 0x39, 0x52, 0x96, 0x7c, 0x75, 0xd7, 0x69, 0xda, 0xad, 0x63, 0xd4, 0xd3, 0x77, 0x13, 0x7a, - 0xba, 0x98, 0x61, 0x2b, 0x6a, 0x89, 0x07, 0x6a, 0xab, 0x95, 0xd2, 0xd6, 0xd7, 0xb2, 0x8b, 0x38, - 0x5c, 0x67, 0x0f, 0x0c, 0x32, 0xa5, 0x71, 0xaf, 0xd9, 0x7e, 0x40, 0xbf, 0xdd, 0xa7, 0xb7, 0xea, - 0x21, 0x7a, 0xd3, 0x7c, 0x77, 0x45, 0x4c, 0x97, 0xea, 0x9b, 0x46, 0x71, 0xa5, 0x90, 0xa2, 0x29, - 0xef, 0x3b, 0x64, 0xc4, 0x0e, 0x58, 0xc7, 0xe7, 0xda, 0xcb, 0x67, 0x34, 0x04, 0xb5, 0xd8, 0xda, - 0x04, 0x4a, 0x19, 0x59, 0x11, 0x78, 0xa0, 0x60, 0xcd, 0x3f, 0xe7, 0x12, 0x5b, 0x12, 0x5a, 0xa5, - 0x0e, 0x29, 0x05, 0x1c, 0xb0, 0xc5, 0x57, 0xca, 0xb7, 0x24, 0xc4, 0x5e, 0xce, 0x20, 0x76, 0x53, - 0x41, 0xac, 0xbb, 0x6d, 0xbb, 0xb1, 0x1f, 0xef, 0x11, 0xc9, 0x3e, 0x44, 0x32, 0xe8, 0x12, 0x19, - 0xe3, 0x21, 0x47, 0x31, 0xa2, 0xbf, 0x7e, 0x01, 0xd9, 0xc7, 0x20, 0x1c, 0xf8, 0x9c, 0x7b, 0x0e, - 0x15, 0x43, 0x42, 0x0a, 0xc4, 0xb3, 0x68, 0x9b, 0x10, 0xbe, 0xb4, 0x8e, 0xeb, 0x88, 0x0d, 0xa0, - 0x19, 0x5c, 0x18, 0x6e, 0xd1, 0xf5, 0x68, 0x7e, 0x6c, 0xcf, 0x31, 0x0d, 0x34, 0x7c, 0x73, 0x95, - 0xbb, 0xa6, 0xb4, 0xd1, 0xd0, 0xf3, 0xa4, 0xdc, 0xb6, 0xfc, 0xe0, 0x6d, 0x75, 0xbe, 0xd2, 0x16, - 0xf2, 0xb1, 0x2f, 0x5e, 0x8b, 0x87, 0x40, 0xe7, 0x33, 0xff, 0x61, 0x90, 0x31, 0x09, 0xf6, 0x38, - 0xac, 0xe9, 0x9d, 0xa4, 0x35, 0xbd, 0x92, 0xe1, 0x58, 0x0f, 0xb0, 0x23, 0x42, 0x4a, 0x6a, 0x17, - 0x6e, 0xcb, 0x7c, 0xbf, 0x80, 0x36, 0xc5, 0x1f, 0xc2, 0x50, 0x5f, 0x25, 0x63, 0x0d, 0xd7, 0x09, - 0x2c, 0x9b, 0xe7, 0x07, 0xe8, 0xbb, 0x67, 0xc2, 0x33, 0xae, 0x87, 0x03, 0x10, 0xf3, 0x08, 0x4f, - 0xdf, 0x74, 0xdb, 0x6d, 0xf7, 0x5d, 0x69, 0x11, 0xa5, 0xf8, 0x4e, 0x5e, 0x95, 0x54, 0xc0, 0x51, - 0x7a, 0x96, 0x94, 0xba, 0x22, 0x82, 0xb8, 0x78, 0xfd, 0x4b, 0xb1, 0x02, 0xd6, 0x91, 0x0e, 0x11, - 0x07, 0x7d, 0x95, 0x8c, 0xfb, 0xb6, 0xd3, 0x60, 0x1b, 0x8c, 0x4b, 0xda, 0xf6, 0x65, 0xd0, 0xce, - 0xd7, 0xa6, 0x39, 0xf7, 0xf8, 0x86, 0x46, 0x87, 0x04, 0x17, 0x57, 0xdb, 0x98, 0x7c, 0xde, 0xb4, - 0x31, 0x52, 0x97, 0x17, 0x5f, 0x7e, 0xc8, 0x63, 0x11, 0x53, 0x6a, 0x13, 0x62, 0x97, 0x1b, 0x21, - 0x02, 0xc4, 0x60, 0x74, 0x91, 0x10, 0x91, 0x6a, 0x71, 0xff, 0xd2, 0xe9, 0xfa, 0x32, 0x7e, 0x97, - 0x62, 0xeb, 0xdb, 0x8c, 0x46, 0x40, 0xe3, 0xa2, 0x2f, 0x93, 0x31, 0xae, 0xa3, 0xf6, 0x1a, 0x57, - 0x93, 0x2f, 0x23, 0x75, 0x5e, 0x09, 0xd8, 0x0c, 0x89, 0x10, 0x8f, 0xd3, 0x0a, 0x21, 0x6d, 0x9b, - 0x87, 0xd5, 0xda, 0x3e, 0x5f, 0xa1, 0x8c, 0xc4, 0xf9, 0xda, 0xa4, 0x00, 0x5f, 0x8b, 0xa8, 0xa0, - 0x71, 0x08, 0xb5, 0x3b, 0xee, 0xbb, 0x16, 0x4f, 0x84, 0xc6, 0x92, 0x6a, 0xbf, 0xe9, 0xde, 0xe6, - 0x54, 0xc0, 0x51, 0xfa, 0x25, 0x32, 0x8a, 0x9b, 0x9c, 0x23, 0x12, 0xb4, 0x2c, 0x92, 0x9e, 0xd0, - 0xc2, 0xc3, 0x31, 0xf3, 0x23, 0x03, 0xa3, 0xcc, 0xad, 0x5e, 0xd0, 0xed, 0x05, 0x3c, 0x71, 0xc9, - 0x05, 0x2e, 0x5a, 0xf6, 0xb9, 0x87, 0x89, 0x2f, 0xc0, 0x9a, 0xcc, 0x63, 0x5c, 0x5d, 0xb5, 0x22, - 0x17, 0x90, 0xdb, 0x74, 0x81, 0x03, 0xd0, 0x2d, 0x42, 0xba, 0x3d, 0x7f, 0x87, 0x9f, 0x8f, 0xc7, - 0x02, 0x0c, 0x2c, 0x8b, 0x87, 0xc3, 0xad, 0xb9, 0x0d, 0xab, 0x9d, 0xc6, 0x94, 0x9a, 0x58, 0x8f, - 0x90, 0x40, 0x43, 0x35, 0x7f, 0x68, 0x90, 0x59, 0xb9, 0xf4, 0x75, 0xd7, 0x0f, 0x54, 0x6e, 0x22, - 0xbd, 0x23, 0xdf, 0xb9, 0x70, 0x05, 0x96, 0xb3, 0x2d, 0x9d, 0xe3, 0x98, 0xda, 0x79, 0x5d, 0x91, - 0x20, 0x1c, 0xa3, 0xcf, 0x90, 0x82, 0xe5, 0xb5, 0xd4, 0x4d, 0x1b, 0xab, 0x95, 0x44, 0xc8, 0x5a, - 0xe2, 0xcf, 0x20, 0xa9, 0x42, 0xcd, 0x7e, 0xc3, 0xb3, 0xbb, 0x7d, 0xf9, 0xe6, 0x86, 0xa4, 0x02, - 0x8e, 0x9a, 0x9f, 0x8d, 0x90, 0x71, 0x3d, 0x73, 0x3e, 0xc6, 0x30, 0xdd, 0x24, 0xa5, 0x30, 0x13, - 0x43, 0x8d, 0xbe, 0x39, 0x9c, 0x7b, 0x50, 0x29, 0x1a, 0x20, 0x46, 0x6d, 0x5c, 0x5c, 0xc1, 0xf0, - 0x09, 0x22, 0x6c, 0xea, 0x92, 0x69, 0xf4, 0xfc, 0x6c, 0xbb, 0xb6, 0xbf, 0xd2, 0x11, 0xb9, 0x73, - 0x3e, 0x8b, 0x41, 0x9c, 0xe0, 0x02, 0xa6, 0x37, 0x53, 0x50, 0xd0, 0x07, 0xce, 0xb3, 0xdc, 0x42, - 0xd3, 0x73, 0x3b, 0xf2, 0xae, 0x0f, 0x2d, 0x44, 0x1e, 0xdc, 0x55, 0x3e, 0x1d, 0x24, 0x08, 0x6d, - 0x90, 0xe2, 0x96, 0xcc, 0x4a, 0xd1, 0x0f, 0x0c, 0x9b, 0x6b, 0xa4, 0x33, 0xda, 0x1a, 0x11, 0xa7, - 0xae, 0xc8, 0x80, 0xd0, 0x74, 0x21, 0x19, 0x4a, 0x8a, 0xf2, 0x82, 0x4d, 0x1d, 0x16, 0x46, 0x68, - 0x9d, 0xe4, 0x99, 0xb3, 0xc7, 0xdd, 0x81, 0xf0, 0xeb, 0x2f, 0x1e, 0xbe, 0xc7, 0x2b, 0xce, 0xde, - 0xdb, 0x96, 0x57, 0x2b, 0xa3, 0x39, 0xe4, 0xf9, 0x33, 0x88, 0xd9, 0x74, 0x8f, 0x94, 0x35, 0xed, - 0x71, 0x6f, 0x91, 0xcf, 0x98, 0x4d, 0xe1, 0xa9, 0xd4, 0xad, 0x9e, 0xcf, 0xe2, 0x18, 0xa8, 0x9d, - 0x15, 0xe8, 0x82, 0xcc, 0x5f, 0x8d, 0xa0, 0x97, 0xc0, 0x2c, 0xff, 0x15, 0x52, 0x08, 0xf6, 0xbb, - 0x61, 0x8e, 0xff, 0x5c, 0x98, 0xf2, 0x6d, 0x72, 0x1a, 0x4f, 0x03, 0xa6, 0x34, 0x56, 0x41, 0x02, - 0xc9, 0xac, 0x9d, 0x4c, 0xee, 0xf8, 0x4e, 0x86, 0xbb, 0xd3, 0x6d, 0xb7, 0xb1, 0xcb, 0xbc, 0xa6, - 0xa8, 0x41, 0xf0, 0xee, 0x8a, 0x2b, 0xb5, 0x1c, 0x51, 0x41, 0xe3, 0xa0, 0xb7, 0x49, 0x9e, 0xaf, - 0x02, 0x4d, 0x6f, 0xc8, 0xfb, 0x74, 0x8d, 0xfb, 0x64, 0x6d, 0x39, 0xa3, 0xe2, 0xa8, 0x38, 0x0d, - 0x04, 0xa2, 0xa8, 0x10, 0x6c, 0x61, 0xdd, 0x3e, 0xb7, 0xc3, 0x0c, 0x89, 0xa1, 0xbc, 0x19, 0x08, - 0x1c, 0xf9, 0x1e, 0x49, 0xe4, 0xd9, 0xae, 0x02, 0x16, 0xb1, 0x49, 0x84, 0x63, 0x76, 0x2f, 0x58, - 0xb6, 0x3d, 0xac, 0x2d, 0xb5, 0xcc, 0x28, 0x1c, 0x01, 0x8d, 0x8b, 0xee, 0xf0, 0xf8, 0x2a, 0x51, - 0xd1, 0x35, 0x8f, 0x66, 0x76, 0xcd, 0x2a, 0x26, 0x6b, 0x58, 0x90, 0x40, 0xa6, 0x77, 0xc9, 0xa8, - 0x2f, 0xff, 0xf3, 0xb3, 0xd9, 0xa9, 0x82, 0xd1, 0x15, 0x1c, 0x95, 0xee, 0x6a, 0xc8, 0x87, 0x50, - 0x80, 0xf9, 0x9f, 0x30, 0x47, 0x93, 0x01, 0x20, 0x99, 0x6b, 0x1a, 0xc7, 0x9b, 0x6b, 0xa6, 0xef, - 0x64, 0xee, 0x71, 0xdd, 0xc9, 0x0f, 0xa3, 0x3b, 0xa9, 0xd2, 0xdb, 0x05, 0x32, 0xd2, 0xdd, 0xb1, - 0xfc, 0xf0, 0x52, 0x3e, 0x1d, 0x66, 0x81, 0xeb, 0x82, 0xc8, 0x6f, 0x25, 0x51, 0xb1, 0x52, 0x3c, - 0x81, 0xe2, 0x94, 0x39, 0x9f, 0xc5, 0xcf, 0xb2, 0xdd, 0x66, 0xdb, 0x98, 0xc5, 0xc5, 0x39, 0x5f, - 0x38, 0x00, 0x31, 0x0f, 0x7d, 0x8d, 0x14, 0x3d, 0x66, 0xf9, 0xdc, 0xe5, 0xa9, 0x9b, 0x75, 0x3a, - 0xb4, 0x4c, 0x90, 0xd4, 0xcf, 0x85, 0x45, 0xa8, 0x8a, 0x4c, 0x3e, 0x03, 0x72, 0xd3, 0xaf, 0x90, - 0xd1, 0xce, 0xe1, 0x5d, 0x98, 0x70, 0x9c, 0xd7, 0x8a, 0x93, 0x3c, 0x8d, 0xf2, 0x82, 0x28, 0xb7, - 0xca, 0x92, 0xcf, 0x51, 0xd1, 0xc6, 0xd8, 0x48, 0xc0, 0x40, 0x0a, 0x96, 0x9f, 0xdb, 0x2c, 0x3f, - 0x9c, 0x6e, 0x9b, 0x89, 0xfc, 0x37, 0x96, 0x56, 0x1c, 0x5e, 0xda, 0x49, 0x2e, 0x6d, 0xb6, 0xde, - 0x8f, 0x05, 0x83, 0x04, 0xd0, 0x8b, 0xa4, 0xb4, 0xdd, 0xf3, 0x2c, 0x41, 0xc4, 0xe4, 0xf0, 0xf9, - 0x30, 0x1f, 0x5e, 0x46, 0x3a, 0xd7, 0xe3, 0x84, 0xc8, 0x27, 0x2b, 0x21, 0x01, 0xa2, 0x29, 0x3c, - 0xb3, 0x9a, 0x77, 0x65, 0xaa, 0xa6, 0x1c, 0x9a, 0x8a, 0xa9, 0xe1, 0xa5, 0xc4, 0x4e, 0x8e, 0x89, - 0x80, 0xf3, 0xb7, 0x0e, 0xe4, 0x84, 0x43, 0x50, 0xe8, 0x37, 0x48, 0xb1, 0x21, 0x2b, 0x27, 0x99, - 0x63, 0x0e, 0x1d, 0x92, 0x89, 0xea, 0xcb, 0x09, 0x00, 0x40, 0x20, 0xf3, 0xdf, 0x05, 0x32, 0x81, - 0xd6, 0x2a, 0xda, 0x8f, 0xad, 0x7d, 0x5e, 0x8e, 0xe9, 0x31, 0xe4, 0xf9, 0x54, 0x0c, 0x99, 0x49, - 0x30, 0x6b, 0x51, 0xe4, 0x07, 0x64, 0x52, 0xb9, 0xef, 0x70, 0x0c, 0xa3, 0xc9, 0xd2, 0x70, 0x37, - 0x4e, 0xed, 0x3b, 0x21, 0x44, 0x59, 0xcd, 0x72, 0x02, 0x1c, 0x52, 0xc2, 0x84, 0x78, 0xf4, 0x72, - 0xa1, 0xf8, 0x7c, 0x16, 0xf1, 0xe8, 0xd1, 0xfa, 0xc5, 0x6f, 0x24, 0xc0, 0x21, 0x25, 0x4c, 0x88, - 0x6f, 0xf4, 0xfc, 0xc0, 0xed, 0x44, 0xe2, 0x0b, 0x59, 0xc4, 0xd7, 0x25, 0xc6, 0x00, 0xf1, 0xf5, - 0x04, 0x38, 0xa4, 0x84, 0xd1, 0x0f, 0x0c, 0x72, 0xf2, 0x2e, 0x73, 0x76, 0x6d, 0xc7, 0x5f, 0xb7, - 0xbb, 0xac, 0xcd, 0x2b, 0x98, 0x68, 0x21, 0xea, 0x9a, 0xae, 0x0e, 0xb7, 0x90, 0xd5, 0x24, 0x58, - 0x72, 0x45, 0x4f, 0xf3, 0x15, 0x9d, 0x5c, 0x1d, 0x2c, 0x0e, 0x0e, 0x5a, 0x87, 0xf9, 0xb7, 0x3c, - 0x16, 0xff, 0xba, 0x3f, 0xd5, 0x3d, 0x90, 0xf1, 0x05, 0x1e, 0x88, 0xeb, 0x58, 0x76, 0xc9, 0xed, - 0xc6, 0x6d, 0xb6, 0x75, 0xdd, 0x75, 0x77, 0xb3, 0x59, 0xd8, 0xb5, 0x04, 0x86, 0xf2, 0xea, 0x52, - 0xc7, 0xc9, 0x01, 0x48, 0x09, 0xa3, 0xfb, 0x64, 0x42, 0xc9, 0x09, 0xa5, 0x2b, 0x03, 0xbb, 0x3c, - 0x74, 0x6e, 0x72, 0x3d, 0x82, 0x50, 0xc2, 0x67, 0x44, 0xa7, 0x38, 0x41, 0x87, 0xa4, 0x24, 0xfa, - 0x9e, 0x41, 0xa6, 0x65, 0x6e, 0x51, 0xdf, 0xb1, 0x9c, 0x96, 0x3a, 0x0d, 0x34, 0xb0, 0x4b, 0x19, - 0xd2, 0x17, 0x85, 0xa2, 0x84, 0xcb, 0x5a, 0x60, 0x25, 0x85, 0x0d, 0x7d, 0xd2, 0xcc, 0x5f, 0xe4, - 0x09, 0xed, 0xef, 0x4e, 0xd1, 0x57, 0x13, 0xce, 0xe2, 0x4c, 0xca, 0x59, 0x4c, 0xeb, 0x33, 0x34, - 0x5f, 0xd1, 0x22, 0x45, 0xb5, 0xea, 0x6c, 0xf5, 0x12, 0xaa, 0x05, 0x71, 0x07, 0xe9, 0x0f, 0xe1, - 0x45, 0xae, 0x83, 0xa7, 0x88, 0xa7, 0x75, 0x34, 0x49, 0x83, 0xcc, 0x24, 0x14, 0x40, 0x7d, 0x52, - 0xd6, 0xb4, 0x86, 0xc7, 0x73, 0x39, 0xf3, 0xf1, 0x84, 0x32, 0x65, 0xf5, 0xa2, 0xd1, 0x41, 0x97, - 0x62, 0xfe, 0xba, 0x48, 0xb4, 0xfc, 0x87, 0x5e, 0xe2, 0x5e, 0x90, 0x79, 0x7b, 0x76, 0x83, 0x2d, - 0x35, 0x1a, 0x6e, 0xcf, 0x09, 0xf0, 0x60, 0xa2, 0x57, 0x08, 0x1b, 0x89, 0x51, 0x48, 0x71, 0xcb, - 0xf6, 0xb9, 0x74, 0x6c, 0x78, 0x30, 0x99, 0xda, 0xe7, 0xa9, 0xe4, 0x18, 0xab, 0x5b, 0x04, 0x4e, - 0x54, 0xcb, 0xf9, 0x63, 0xac, 0x96, 0x6d, 0x52, 0xf2, 0x93, 0xbe, 0xf8, 0x8d, 0x4c, 0xef, 0x02, - 0xd0, 0xe7, 0x45, 0xbd, 0xb1, 0xc8, 0xd1, 0x45, 0xf0, 0x42, 0x6b, 0x2a, 0x68, 0xa3, 0xaf, 0xcd, - 0xa2, 0x35, 0x95, 0x11, 0xc4, 0x5a, 0x53, 0xcf, 0x80, 0xc0, 0xbc, 0x46, 0x1b, 0xf3, 0x98, 0xd2, - 0xa0, 0x8f, 0xa9, 0xd0, 0x17, 0xd4, 0x06, 0x80, 0xec, 0xa2, 0xff, 0x61, 0x7b, 0xac, 0xc3, 0x9c, - 0xc0, 0x8f, 0xb3, 0xc8, 0x70, 0xd4, 0x87, 0x18, 0x97, 0xf6, 0x08, 0xe9, 0x46, 0x2d, 0x1b, 0xac, - 0x40, 0x96, 0x32, 0xec, 0x25, 0xd9, 0xf7, 0x89, 0x13, 0xf5, 0x98, 0x0e, 0x9a, 0x20, 0xfa, 0x2d, - 0x72, 0x2a, 0xce, 0xc7, 0x96, 0x99, 0xb5, 0x2d, 0xc3, 0x06, 0xf6, 0x19, 0x55, 0xe3, 0xed, 0x59, - 0x3e, 0xfd, 0x54, 0xfd, 0x20, 0x26, 0x38, 0x78, 0xbe, 0xf9, 0xd7, 0x02, 0x99, 0x1d, 0x10, 0x55, - 0xe9, 0x2d, 0xec, 0x6d, 0x64, 0xea, 0xa8, 0x45, 0xef, 0x52, 0xb4, 0xfe, 0x86, 0xec, 0xac, 0xb5, - 0xdb, 0x8f, 0xaa, 0xb3, 0x16, 0x22, 0x81, 0x86, 0x1a, 0xf6, 0x2a, 0xf2, 0x47, 0xea, 0x55, 0xac, - 0x12, 0xca, 0xee, 0x71, 0xf5, 0x33, 0xcc, 0xa8, 0xc4, 0x5f, 0x55, 0x68, 0x97, 0x6a, 0xf3, 0xc8, - 0x4d, 0xaf, 0xf4, 0x71, 0xc0, 0x80, 0x59, 0xa2, 0x50, 0x69, 0xba, 0xdc, 0x76, 0xc4, 0x7a, 0xa5, - 0xf1, 0x6b, 0x85, 0xca, 0xd5, 0x70, 0x00, 0x62, 0x1e, 0x6e, 0xc7, 0x51, 0xf1, 0x59, 0x94, 0xbb, - 0xb8, 0x90, 0xa5, 0xf8, 0x94, 0x66, 0x75, 0x60, 0xd5, 0x49, 0x97, 0xc8, 0x94, 0x9c, 0xb4, 0xb4, - 0xbe, 0x12, 0x76, 0x82, 0xd4, 0x7b, 0xd9, 0x93, 0x38, 0x45, 0x35, 0x42, 0xe2, 0x61, 0x48, 0xf3, - 0x9b, 0x7f, 0xc8, 0x93, 0xd9, 0x01, 0xa9, 0x68, 0xd4, 0x12, 0x33, 0x1e, 0x45, 0x4b, 0xec, 0x71, - 0x98, 0x0c, 0xcf, 0xaf, 0x1c, 0xb7, 0x6e, 0x35, 0x76, 0x18, 0x36, 0xf9, 0x23, 0xb5, 0xdd, 0x54, - 0x64, 0x08, 0xc7, 0x43, 0xeb, 0x2a, 0x1c, 0xc9, 0xba, 0x86, 0xb6, 0x88, 0x4b, 0x61, 0xdd, 0x20, - 0xda, 0x3e, 0xeb, 0x56, 0xb0, 0x83, 0x0d, 0x93, 0x28, 0x64, 0x2d, 0x27, 0x46, 0x21, 0xc5, 0x6d, - 0xfe, 0xce, 0x20, 0xb3, 0x03, 0x52, 0xba, 0x44, 0x9c, 0x31, 0x8e, 0x31, 0xce, 0x88, 0x86, 0x74, - 0x7c, 0x80, 0x7a, 0x43, 0x5a, 0x1d, 0x06, 0x8e, 0x9a, 0x9f, 0xf4, 0xad, 0xf3, 0xca, 0x1e, 0xf7, - 0xc9, 0xd9, 0x5a, 0x76, 0xeb, 0xaa, 0x3b, 0xa6, 0x4c, 0xe6, 0xfc, 0xd0, 0x19, 0xe8, 0x8a, 0xd3, - 0x74, 0x53, 0x6d, 0xb1, 0x47, 0xe1, 0x5a, 0xcc, 0x0f, 0x0d, 0x32, 0x99, 0x6c, 0xbe, 0xd1, 0x67, - 0x49, 0xbe, 0xe7, 0xd9, 0xb8, 0xbb, 0x68, 0xc6, 0x5b, 0xb0, 0x02, 0x82, 0x2e, 0x86, 0x3d, 0xd6, - 0x44, 0xd5, 0x45, 0xc3, 0xdc, 0xb4, 0x41, 0xd0, 0xc5, 0x1b, 0x9b, 0x9d, 0x20, 0xe8, 0xae, 0x7b, - 0xee, 0xbd, 0x7d, 0x6c, 0x6d, 0xc8, 0x37, 0x36, 0xd7, 0x37, 0x37, 0xd7, 0x25, 0x11, 0xe2, 0x71, - 0xd1, 0x62, 0x14, 0x0f, 0xbe, 0xe2, 0x2e, 0xc4, 0x2d, 0x46, 0xc1, 0xbd, 0xa1, 0xd8, 0x35, 0x0e, - 0xf3, 0xb7, 0x06, 0xa1, 0xfd, 0xe9, 0xf8, 0xff, 0x9c, 0xe1, 0xfc, 0x2c, 0x47, 0x46, 0xf1, 0xcc, - 0xe8, 0xf7, 0x79, 0x09, 0x94, 0xd0, 0x6f, 0xb6, 0x15, 0xa6, 0x1a, 0xa4, 0xd1, 0x55, 0x4b, 0xd2, - 0x21, 0x25, 0x8b, 0xbe, 0x6f, 0x90, 0x19, 0x4e, 0x4a, 0xee, 0x2f, 0x5b, 0xd3, 0xf8, 0x5a, 0x1a, - 0xa6, 0x76, 0x0a, 0x17, 0x31, 0xd3, 0x37, 0x04, 0xfd, 0x42, 0xcd, 0xbf, 0xe7, 0x48, 0x3f, 0xa3, - 0x50, 0x69, 0x43, 0x25, 0x2f, 0xc6, 0xc0, 0x8f, 0x91, 0x70, 0x54, 0xd4, 0x1f, 0x96, 0xfc, 0x9a, - 0x27, 0xdb, 0xe2, 0x95, 0x54, 0xd1, 0xc8, 0xf5, 0xdc, 0xf6, 0x5b, 0x3c, 0x85, 0xd6, 0xbe, 0xa6, - 0x91, 0xb0, 0x80, 0xf0, 0xb4, 0x2b, 0x5e, 0xde, 0xe2, 0xc7, 0x39, 0xd9, 0xbe, 0xb1, 0xe8, 0x97, - 0xa5, 0xbd, 0xfd, 0x45, 0x64, 0x88, 0x85, 0x0c, 0xd1, 0xd1, 0x33, 0x7f, 0xce, 0xab, 0xca, 0x74, - 0x3d, 0x28, 0xe6, 0xcb, 0xfa, 0x62, 0x65, 0x39, 0x5d, 0x8f, 0xaf, 0x28, 0x32, 0x84, 0xe3, 0x74, - 0x93, 0x8c, 0x8a, 0x30, 0x06, 0x78, 0x7f, 0x87, 0x0e, 0x87, 0xf2, 0xf5, 0xdf, 0x55, 0x85, 0x00, - 0x21, 0x94, 0xf9, 0x17, 0x7e, 0x2b, 0xfb, 0xcb, 0x20, 0xee, 0xf1, 0x4e, 0x88, 0xb7, 0x36, 0x51, - 0xd7, 0x75, 0x25, 0xb1, 0xc8, 0x67, 0x70, 0x91, 0x27, 0xd6, 0x06, 0xf0, 0xc0, 0xc0, 0x99, 0x51, - 0x28, 0xcf, 0x3d, 0x82, 0x50, 0x6e, 0xfe, 0x3e, 0x47, 0xca, 0xda, 0xab, 0x81, 0xe3, 0x48, 0x2f, - 0x47, 0xba, 0x3c, 0xdc, 0x85, 0x1f, 0x20, 0x5c, 0xcc, 0xfc, 0xd6, 0x42, 0x04, 0xcd, 0xf8, 0x53, - 0x04, 0xf1, 0xe4, 0x83, 0x82, 0x4e, 0xe5, 0x23, 0xf9, 0xe3, 0xc8, 0x47, 0xcc, 0x1f, 0x1b, 0x64, - 0x2a, 0xb5, 0x1a, 0xf1, 0xbe, 0xc4, 0x8f, 0x9e, 0xf0, 0x44, 0xa3, 0xa2, 0x21, 0xe6, 0x03, 0x8d, - 0x4b, 0xa6, 0x0d, 0xcc, 0x0f, 0x6c, 0x47, 0x76, 0x5f, 0xc5, 0x7b, 0x96, 0x5c, 0x2a, 0x6d, 0x48, - 0x8c, 0x42, 0x8a, 0xdb, 0xfc, 0xa5, 0x41, 0x9e, 0x39, 0xac, 0xc9, 0x25, 0x92, 0x48, 0xec, 0x64, - 0x45, 0x89, 0x89, 0x91, 0x4c, 0x22, 0x57, 0x93, 0xc3, 0x90, 0xe6, 0x17, 0x1f, 0xb6, 0x68, 0x24, - 0x5c, 0x60, 0xf4, 0x02, 0x41, 0x9b, 0x0e, 0x3a, 0x9f, 0xf9, 0x47, 0xee, 0x66, 0xfb, 0x5e, 0xb2, - 0xd0, 0x3b, 0x51, 0xb8, 0x30, 0xb2, 0x1f, 0xcc, 0xe0, 0x10, 0x73, 0x64, 0x65, 0xf2, 0xb8, 0x4f, - 0xe2, 0xcc, 0x9c, 0xb6, 0xc9, 0xb8, 0x02, 0x4e, 0xc4, 0xa8, 0x2c, 0x0b, 0x3e, 0x81, 0x0b, 0x18, - 0xdf, 0xd0, 0xf0, 0x20, 0x81, 0x2e, 0x32, 0xce, 0x8e, 0x68, 0x5e, 0xc8, 0x23, 0xca, 0x25, 0x3f, - 0x90, 0xb9, 0x11, 0x0e, 0x40, 0xcc, 0x63, 0xfe, 0x64, 0x84, 0xcc, 0x0e, 0xe8, 0xf3, 0xfe, 0x1f, - 0x97, 0x84, 0xdc, 0xe9, 0xab, 0xcf, 0x26, 0xfc, 0x74, 0xd0, 0x50, 0x5f, 0x55, 0x88, 0xda, 0x4a, - 0xfd, 0x23, 0xde, 0xb0, 0xdb, 0x4e, 0x43, 0xf5, 0x13, 0xac, 0x30, 0xc3, 0x57, 0x3d, 0xaa, 0x98, - 0x0c, 0x3a, 0x4f, 0xb2, 0x24, 0x28, 0x3e, 0x54, 0x91, 0x38, 0x8e, 0x5f, 0x52, 0xab, 0x8f, 0x1c, - 0x46, 0xb3, 0x1c, 0x88, 0x7c, 0x0d, 0x0a, 0x1a, 0x0c, 0x24, 0x40, 0xe9, 0x8f, 0x78, 0xf4, 0x43, - 0xc2, 0x92, 0x17, 0xd8, 0x4d, 0xab, 0x11, 0xbd, 0x10, 0x3d, 0xa2, 0x73, 0x9d, 0xc3, 0xcd, 0x4d, - 0x43, 0x0a, 0x1e, 0xfa, 0x04, 0x9a, 0x77, 0xf8, 0x55, 0x4f, 0x47, 0x78, 0x7a, 0x86, 0x14, 0x1c, - 0xf1, 0x31, 0xb1, 0xf2, 0x37, 0x91, 0x65, 0xc9, 0x6f, 0x88, 0xe5, 0x08, 0x7d, 0x81, 0x8c, 0x30, - 0xf9, 0x85, 0xb0, 0xb2, 0xf7, 0xc8, 0x9d, 0xab, 0x0f, 0x83, 0xd5, 0x98, 0xf9, 0x01, 0xcf, 0xc6, - 0x53, 0x09, 0x52, 0xa6, 0x62, 0xe3, 0x8e, 0x5e, 0x6c, 0x1c, 0x39, 0xcf, 0x4b, 0x94, 0x1d, 0x66, - 0x93, 0x4c, 0x26, 0x7b, 0xac, 0x5a, 0x5a, 0x6c, 0x1c, 0x96, 0x16, 0x8b, 0xcf, 0xd7, 0x2c, 0xf1, - 0x1d, 0x1b, 0x37, 0x62, 0x7c, 0x45, 0x1a, 0xb5, 0xe8, 0x96, 0x90, 0x0e, 0x11, 0x47, 0xed, 0xc5, - 0xfb, 0x9f, 0x9c, 0x7e, 0xe2, 0x01, 0xff, 0xfd, 0x93, 0xff, 0xde, 0xfb, 0xf4, 0xb4, 0x71, 0x9f, - 0xff, 0x1e, 0xf0, 0xdf, 0xbf, 0xf8, 0xef, 0xa7, 0x9f, 0x9d, 0x7e, 0xe2, 0x4e, 0x6e, 0x6f, 0xe1, - 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xf1, 0x46, 0xe8, 0xd7, 0x30, 0x00, 0x00, + // 3248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5b, 0x4b, 0x6c, 0x1b, 0xc7, + 0xf9, 0xf7, 0x92, 0x14, 0x45, 0x0d, 0x65, 0x49, 0x1e, 0x39, 0xb1, 0xac, 0x24, 0xb6, 0xb3, 0xc9, + 0xff, 0x8f, 0x14, 0x49, 0x28, 0xd8, 0x79, 0xd4, 0x79, 0x36, 0x22, 0x65, 0x27, 0x52, 0x65, 0x5b, + 0xfd, 0xa4, 0x3c, 0xea, 0xa2, 0x2d, 0x56, 0xe4, 0x90, 0xda, 0x88, 0xdc, 0x65, 0x77, 0x97, 0x8a, + 0x05, 0xb4, 0x40, 0xd0, 0xa2, 0x40, 0x7a, 0xeb, 0xeb, 0x90, 0x4b, 0xd1, 0x06, 0xe8, 0x03, 0x45, + 0x0e, 0x45, 0x1f, 0x87, 0x02, 0xbd, 0xb4, 0x40, 0x0f, 0x39, 0x15, 0x39, 0xf6, 0x50, 0x04, 0x4d, + 0x72, 0xe8, 0xb9, 0xd7, 0x9c, 0x3a, 0x8f, 0x6f, 0x76, 0x67, 0x97, 0x94, 0x62, 0xae, 0xac, 0xa0, + 0x40, 0x0f, 0x34, 0xb4, 0xdf, 0x7c, 0xf3, 0xfb, 0x66, 0xbf, 0xf9, 0xe6, 0x7b, 0xed, 0x98, 0x3c, + 0xdb, 0x71, 0xa3, 0x9d, 0xc1, 0x76, 0xad, 0xe9, 0xf7, 0x96, 0xfc, 0x3e, 0xf3, 0xc2, 0x1d, 0xb7, + 0x1d, 0x2d, 0xf9, 0x81, 0xdb, 0x71, 0xbd, 0xa5, 0xfe, 0x6e, 0x67, 0x69, 0x7b, 0xe0, 0x76, 0x5b, + 0x4b, 0x4e, 0xdf, 0x5d, 0xda, 0xbb, 0xb8, 0xd4, 0x61, 0x1e, 0x0b, 0x9c, 0x88, 0xb5, 0x6a, 0xfd, + 0xc0, 0x8f, 0x7c, 0xfa, 0x48, 0x32, 0xbb, 0x16, 0xcf, 0xae, 0xa9, 0xd9, 0x35, 0x3e, 0xbb, 0x26, + 0x67, 0xd7, 0xf8, 0xec, 0xda, 0xde, 0xc5, 0xc5, 0x47, 0x0d, 0x59, 0x1d, 0xbf, 0xe3, 0x2f, 0x49, + 0x90, 0xed, 0x41, 0x5b, 0x3e, 0xc9, 0x07, 0xf9, 0x97, 0x02, 0x5f, 0x7c, 0x62, 0xf7, 0x72, 0x58, + 0x73, 0xfd, 0xa5, 0xdd, 0xc1, 0x36, 0x0b, 0x3c, 0x16, 0xb1, 0x50, 0x2e, 0x48, 0x2c, 0x65, 0xe0, + 0xed, 0xb1, 0x20, 0x74, 0x7d, 0x8f, 0xb5, 0xb2, 0x6b, 0x5a, 0x7c, 0xe4, 0xe0, 0x69, 0xc3, 0x6f, + 0xb0, 0xf8, 0xe8, 0x68, 0xee, 0x60, 0xe0, 0x45, 0x6e, 0x8f, 0x0d, 0xb1, 0x5f, 0x1c, 0xcd, 0x3e, + 0x88, 0xdc, 0xee, 0x92, 0xeb, 0x45, 0x61, 0x14, 0x64, 0xa7, 0xd8, 0x7f, 0x2c, 0x91, 0xb3, 0x75, + 0xd7, 0x73, 0x82, 0xfd, 0xba, 0x50, 0x06, 0xb0, 0x6f, 0x0c, 0x58, 0x18, 0xdd, 0xe8, 0x47, 0x7c, + 0xf9, 0x21, 0x7d, 0x8d, 0x54, 0x7a, 0x2c, 0x72, 0x5a, 0x4e, 0xe4, 0x2c, 0x58, 0x17, 0xac, 0x87, + 0xaa, 0x97, 0x1e, 0xaa, 0x29, 0x19, 0xb5, 0x44, 0x86, 0x54, 0xa5, 0x52, 0x62, 0xed, 0xc6, 0xf6, + 0xeb, 0xac, 0x19, 0x5d, 0xe3, 0x73, 0xea, 0xf4, 0xbd, 0x0f, 0xce, 0x9f, 0xf8, 0xe8, 0x83, 0xf3, + 0x24, 0xa1, 0x41, 0x8c, 0x46, 0xff, 0x9f, 0x94, 0x9d, 0xf0, 0xaa, 0xdb, 0x65, 0x0b, 0x05, 0x8e, + 0x3b, 0x55, 0x9f, 0x41, 0xee, 0xf2, 0xb2, 0xa4, 0x02, 0x8e, 0xd2, 0x27, 0xc9, 0x4c, 0xc0, 0xf6, + 0x5c, 0xa1, 0xcd, 0x86, 0xdf, 0xeb, 0xb9, 0xd1, 0x42, 0x31, 0xcd, 0xaf, 0xa8, 0x90, 0xe1, 0xa2, + 0x4f, 0x91, 0x59, 0x4d, 0xb9, 0xc6, 0xc2, 0xd0, 0xe9, 0xb0, 0x85, 0x92, 0x9c, 0x38, 0x8b, 0x13, + 0x27, 0x91, 0x0c, 0x59, 0x3e, 0x5a, 0x27, 0x54, 0x93, 0x96, 0x07, 0xd1, 0x8e, 0x1f, 0x5c, 0x77, + 0x7a, 0x6c, 0x61, 0x42, 0xce, 0x8e, 0x5f, 0x2a, 0x19, 0x81, 0x11, 0xdc, 0xf4, 0x0a, 0x99, 0x4f, + 0x53, 0xaf, 0xf4, 0x1c, 0xb7, 0xbb, 0x50, 0x96, 0x20, 0xf3, 0x08, 0x52, 0x35, 0x86, 0x60, 0x14, + 0x3f, 0xfd, 0x22, 0xb9, 0x2b, 0xfd, 0x5e, 0x11, 0x53, 0xab, 0x99, 0x94, 0x40, 0x77, 0x21, 0xd0, + 0xc9, 0xd4, 0x20, 0x8c, 0x9e, 0x43, 0xaf, 0x93, 0xbb, 0x87, 0x06, 0xd4, 0xb2, 0x2a, 0x12, 0xed, + 0x6e, 0x44, 0x9b, 0x49, 0x8f, 0xc2, 0x01, 0xb3, 0xec, 0x67, 0xc8, 0x29, 0xc3, 0x72, 0x36, 0xfd, + 0x41, 0xd0, 0x64, 0xc6, 0xbe, 0x5a, 0x87, 0xed, 0xab, 0xfd, 0xd3, 0x02, 0x99, 0x90, 0xf3, 0x8e, + 0xd1, 0xc6, 0xbe, 0x4c, 0x4a, 0x61, 0x9f, 0x35, 0xa5, 0x85, 0x55, 0x2f, 0x7d, 0xbe, 0x36, 0x8e, + 0x3b, 0xa8, 0xa9, 0x97, 0xe2, 0xd3, 0xeb, 0xd3, 0x28, 0xa4, 0x24, 0x9e, 0x40, 0x42, 0x52, 0x87, + 0x94, 0xc3, 0xc8, 0x89, 0x06, 0xa1, 0x34, 0xc7, 0xea, 0xa5, 0xa7, 0xf2, 0x80, 0x4b, 0x80, 0x44, + 0x43, 0xea, 0x19, 0x10, 0xd8, 0xfe, 0x6d, 0x81, 0x54, 0x25, 0x5f, 0xc3, 0xf7, 0xda, 0x6e, 0xe7, + 0x18, 0xf5, 0xf4, 0xf5, 0x94, 0x9e, 0x9e, 0xcb, 0xf1, 0x2a, 0x6a, 0x89, 0x07, 0x6a, 0xab, 0x93, + 0xd1, 0xd6, 0x17, 0xf2, 0x8b, 0x38, 0x5c, 0x67, 0xef, 0x5b, 0x64, 0xd6, 0xe0, 0x5e, 0x77, 0xc3, + 0x88, 0x7e, 0x75, 0x48, 0x6f, 0x4b, 0x87, 0xe8, 0xcd, 0xf0, 0xdd, 0x35, 0x31, 0x5d, 0xaa, 0x6f, + 0x0e, 0xc5, 0x55, 0x34, 0xc5, 0x50, 0xde, 0xd7, 0xc8, 0x84, 0x1b, 0xb1, 0x5e, 0xc8, 0xb5, 0x57, + 0xcc, 0x69, 0x08, 0x6a, 0xb1, 0xf5, 0x93, 0x28, 0x65, 0x62, 0x55, 0xe0, 0x81, 0x82, 0xb5, 0x7f, + 0x57, 0x48, 0xbd, 0x92, 0xd0, 0x2a, 0xf5, 0x48, 0x25, 0xe2, 0x80, 0x1d, 0xbe, 0x52, 0xfe, 0x4a, + 0x42, 0xec, 0x0b, 0x39, 0xc4, 0x6e, 0x29, 0x88, 0x0d, 0xbf, 0xeb, 0x36, 0xf7, 0x93, 0x77, 0x44, + 0x72, 0x08, 0xb1, 0x0c, 0xba, 0x4c, 0xa6, 0x78, 0xc8, 0x51, 0x8c, 0xe8, 0xaf, 0x1f, 0x40, 0xf6, + 0x29, 0xd0, 0x03, 0x9f, 0x70, 0xcf, 0xa1, 0x62, 0x88, 0xa6, 0x40, 0x32, 0x8b, 0x76, 0x09, 0xe1, + 0x4b, 0xeb, 0xf9, 0x9e, 0x78, 0x01, 0x34, 0x83, 0xcb, 0xe3, 0x2d, 0xba, 0x11, 0xcf, 0x4f, 0xec, + 0x39, 0xa1, 0x81, 0x81, 0x6f, 0xaf, 0x71, 0xd7, 0x94, 0x35, 0x1a, 0xfa, 0x04, 0xa9, 0x76, 0x9d, + 0x30, 0x7a, 0x45, 0xed, 0xaf, 0xb4, 0x85, 0x62, 0xe2, 0x8b, 0xd7, 0x93, 0x21, 0x30, 0xf9, 0xec, + 0xbf, 0x5a, 0x64, 0x4a, 0x82, 0x7d, 0x16, 0xd6, 0xf4, 0x5a, 0xda, 0x9a, 0x1e, 0xcb, 0xb1, 0xad, + 0x07, 0xd8, 0x11, 0x21, 0x15, 0xf5, 0x16, 0x7e, 0xc7, 0x7e, 0xab, 0x84, 0x36, 0xc5, 0x1f, 0x74, + 0xa8, 0x5f, 0x22, 0x53, 0x4d, 0xdf, 0x8b, 0x1c, 0x97, 0xe7, 0x07, 0xe8, 0xbb, 0x4f, 0xe9, 0x3d, + 0x6e, 0xe8, 0x01, 0x48, 0x78, 0x84, 0xa7, 0x6f, 0xfb, 0xdd, 0xae, 0xff, 0x86, 0xb4, 0x88, 0x4a, + 0x72, 0x26, 0xaf, 0x4a, 0x2a, 0xe0, 0x28, 0x7d, 0x84, 0x54, 0xfa, 0x22, 0x82, 0xf8, 0x78, 0xfc, + 0x2b, 0x89, 0x02, 0x36, 0x90, 0x0e, 0x31, 0x07, 0x7d, 0x9c, 0x4c, 0x87, 0xae, 0xd7, 0x64, 0x9b, + 0x8c, 0x4b, 0x6a, 0x85, 0x32, 0x68, 0x17, 0xeb, 0x73, 0x9c, 0x7b, 0x7a, 0xd3, 0xa0, 0x43, 0x8a, + 0x8b, 0xab, 0x6d, 0x4a, 0x3e, 0x6f, 0xb9, 0x18, 0xa9, 0xab, 0x97, 0x1e, 0xbe, 0xcd, 0x6d, 0x11, + 0x53, 0xea, 0x27, 0xc5, 0x5b, 0x6e, 0x6a, 0x04, 0x48, 0xc0, 0xe8, 0x25, 0x42, 0x44, 0xaa, 0xc5, + 0xfd, 0x4b, 0xaf, 0x1f, 0xca, 0xf8, 0x5d, 0x49, 0xac, 0x6f, 0x2b, 0x1e, 0x01, 0x83, 0x8b, 0x3e, + 0x4c, 0xa6, 0xb8, 0x8e, 0xba, 0xeb, 0x5c, 0x4d, 0xa1, 0x8c, 0xd4, 0x45, 0x25, 0x60, 0x4b, 0x13, + 0x21, 0x19, 0xa7, 0x35, 0x42, 0xba, 0x2e, 0x0f, 0xab, 0xf5, 0x7d, 0xbe, 0x42, 0x19, 0x89, 0x8b, + 0xf5, 0x19, 0x01, 0xbe, 0x1e, 0x53, 0xc1, 0xe0, 0x10, 0x6a, 0xf7, 0xfc, 0x37, 0x1c, 0x9e, 0x08, + 0x4d, 0xa5, 0xd5, 0x7e, 0xdd, 0x7f, 0x95, 0x53, 0x01, 0x47, 0xe9, 0xff, 0x91, 0x49, 0x7c, 0xc9, + 0x05, 0x22, 0x41, 0xab, 0x22, 0xe9, 0xd1, 0x16, 0xae, 0xc7, 0xec, 0x5f, 0xeb, 0x28, 0x73, 0x63, + 0x10, 0xf5, 0x07, 0x11, 0x4f, 0x5c, 0x0a, 0x91, 0x8f, 0x96, 0xfd, 0xe8, 0xed, 0xc4, 0x17, 0x60, + 0x6d, 0x16, 0x30, 0xae, 0xae, 0x7a, 0x99, 0x0b, 0x28, 0x6c, 0xf9, 0xc0, 0x01, 0xe8, 0x36, 0x21, + 0xfd, 0x41, 0xb8, 0xc3, 0xf7, 0x27, 0x60, 0x11, 0x06, 0x96, 0x4b, 0x87, 0xc3, 0xad, 0xfb, 0x4d, + 0xa7, 0x9b, 0xc5, 0x94, 0x9a, 0xd8, 0x88, 0x91, 0xc0, 0x40, 0xa5, 0x3e, 0xa9, 0xba, 0x3d, 0x9e, + 0xb0, 0xad, 0x3b, 0xdb, 0xac, 0x2b, 0x6c, 0xab, 0x38, 0xbe, 0x4f, 0x59, 0x8d, 0x01, 0x12, 0x4f, + 0x90, 0xd0, 0x42, 0x30, 0x25, 0xd8, 0xdf, 0xb6, 0xc8, 0xbc, 0xd4, 0xd5, 0x86, 0x1f, 0x46, 0x2a, + 0x19, 0x92, 0xee, 0x98, 0xab, 0x5a, 0xf8, 0x1e, 0xc7, 0x6b, 0x49, 0x6f, 0x3c, 0xa5, 0x54, 0xdd, + 0x50, 0x24, 0xd0, 0x63, 0xf4, 0x5e, 0x52, 0x72, 0x82, 0x8e, 0x3a, 0xda, 0x53, 0xf5, 0x8a, 0x88, + 0x91, 0xcb, 0xfc, 0x19, 0x24, 0x55, 0xec, 0x6b, 0xd8, 0x0c, 0xdc, 0xfe, 0x50, 0x82, 0xbb, 0x29, + 0xa9, 0x80, 0xa3, 0xf6, 0xc7, 0x13, 0x64, 0xda, 0x4c, 0xd5, 0x8f, 0x31, 0x2f, 0x68, 0x93, 0x8a, + 0x4e, 0xfd, 0x70, 0x0b, 0x9f, 0x1d, 0x4f, 0xbb, 0x2a, 0x27, 0x04, 0xc4, 0xa8, 0x4f, 0x8b, 0x33, + 0xaf, 0x9f, 0x20, 0xc6, 0xe6, 0x1b, 0x39, 0x87, 0xa1, 0x86, 0xb5, 0xea, 0xfb, 0x52, 0xfd, 0x18, + 0x21, 0xc6, 0xb4, 0xc0, 0xd3, 0x5c, 0xc0, 0xdc, 0x56, 0x06, 0x0a, 0x86, 0xc0, 0x79, 0x5a, 0x5d, + 0x6a, 0x07, 0x7e, 0x4f, 0x3a, 0x97, 0xb1, 0x85, 0xc8, 0x8d, 0xbb, 0xca, 0xa7, 0x83, 0x04, 0xa1, + 0x4d, 0x52, 0xde, 0x96, 0x69, 0x30, 0x3a, 0x9e, 0x71, 0x93, 0x9b, 0x6c, 0x0a, 0x5d, 0x27, 0x62, + 0xd7, 0x15, 0x19, 0x10, 0x9a, 0x5e, 0x4c, 0xc7, 0xae, 0xb2, 0x3c, 0xd1, 0xb3, 0x87, 0xc5, 0x2d, + 0xda, 0x20, 0x45, 0xe6, 0xed, 0x71, 0xff, 0x23, 0x8e, 0xc5, 0x83, 0x87, 0xbf, 0xe3, 0x15, 0x6f, + 0xef, 0x15, 0x27, 0xa8, 0x57, 0xd1, 0x1c, 0x8a, 0xfc, 0x19, 0xc4, 0x6c, 0xba, 0x47, 0xaa, 0x86, + 0xf6, 0xb8, 0x7b, 0x2a, 0xe6, 0x4c, 0xdf, 0x70, 0x57, 0x1a, 0xce, 0x20, 0x64, 0xc9, 0x51, 0x33, + 0xf6, 0x0a, 0x4c, 0x41, 0xf6, 0x4f, 0x26, 0xd0, 0x2d, 0x61, 0x59, 0xf1, 0x18, 0x29, 0x45, 0xfb, + 0x7d, 0x5d, 0x54, 0x9c, 0xd7, 0x39, 0xe6, 0x16, 0xa7, 0xf1, 0xbc, 0x63, 0xd6, 0x60, 0x15, 0x24, + 0x90, 0xcc, 0xc6, 0xce, 0x14, 0x8e, 0x6f, 0x67, 0xb8, 0xff, 0x6e, 0xf9, 0xcd, 0x5d, 0x16, 0xb4, + 0x45, 0xd1, 0x83, 0x67, 0x57, 0x1c, 0xa9, 0x95, 0x98, 0x0a, 0x06, 0x07, 0x7d, 0x95, 0x14, 0xf9, + 0x2a, 0xd0, 0xf4, 0xc6, 0x3c, 0x4f, 0x2f, 0xf2, 0x20, 0x60, 0x2c, 0x67, 0x52, 0x6c, 0x15, 0xa7, + 0x81, 0x40, 0x14, 0x25, 0x89, 0x74, 0x56, 0x21, 0xb7, 0xc3, 0x1c, 0x99, 0xa8, 0x3c, 0x19, 0x08, + 0x1c, 0xfb, 0x1e, 0x49, 0xe4, 0xe9, 0xb5, 0x02, 0x16, 0xc1, 0x50, 0xc4, 0x7f, 0x76, 0x2b, 0x5a, + 0x71, 0x03, 0x2c, 0x66, 0x8d, 0x54, 0x4c, 0x8f, 0x80, 0xc1, 0x45, 0x77, 0x78, 0x40, 0x97, 0xa8, + 0x18, 0x0b, 0x26, 0x73, 0xc7, 0x02, 0x95, 0x04, 0x18, 0x58, 0x90, 0x42, 0xa6, 0xaf, 0x93, 0xc9, + 0x50, 0xfe, 0x15, 0xe6, 0xb3, 0x53, 0x05, 0x63, 0x2a, 0x38, 0xee, 0x15, 0xa8, 0xa1, 0x10, 0xb4, + 0x00, 0xfb, 0xdf, 0x3a, 0x29, 0x94, 0x01, 0x20, 0x9d, 0xdc, 0x5a, 0xc7, 0x9b, 0xdc, 0x66, 0xcf, + 0x64, 0xe1, 0xb3, 0x3a, 0x93, 0xef, 0xc6, 0x67, 0x52, 0xe5, 0xd3, 0x17, 0xc9, 0x44, 0x7f, 0xc7, + 0x09, 0xf5, 0xa1, 0xbc, 0x47, 0xa7, 0x9d, 0x1b, 0x82, 0xc8, 0x4f, 0x25, 0x51, 0xb1, 0x52, 0x3c, + 0x81, 0xe2, 0x94, 0x49, 0xa6, 0xc3, 0xf7, 0xb2, 0xdb, 0x65, 0x2d, 0x4c, 0x1b, 0x93, 0x24, 0x53, + 0x0f, 0x40, 0xc2, 0x43, 0x9f, 0x24, 0xe5, 0x80, 0x39, 0x21, 0x77, 0x79, 0xea, 0x64, 0x9d, 0xd3, + 0x96, 0x09, 0x92, 0xfa, 0x89, 0xb0, 0x08, 0x55, 0x02, 0xca, 0x67, 0x40, 0x6e, 0xfa, 0x39, 0x32, + 0xd9, 0x3b, 0xbc, 0xed, 0xa3, 0xc7, 0x79, 0x71, 0x3a, 0xc3, 0xf3, 0xb6, 0x20, 0x8a, 0x93, 0xb9, + 0x3c, 0x09, 0x24, 0x15, 0x7d, 0x93, 0xcd, 0x14, 0x0c, 0x64, 0x60, 0xf9, 0xbe, 0xcd, 0xf3, 0xcd, + 0xe9, 0x77, 0x99, 0x48, 0xb8, 0x13, 0x69, 0xe5, 0xf1, 0xa5, 0x9d, 0xe1, 0xd2, 0xe6, 0x1b, 0xc3, + 0x58, 0x30, 0x4a, 0x00, 0x7d, 0x8e, 0x54, 0x5a, 0x83, 0xc0, 0x11, 0x44, 0xcc, 0x46, 0xef, 0xd7, + 0x09, 0xf8, 0x0a, 0xd2, 0xb9, 0x1e, 0x4f, 0x8a, 0x04, 0xb6, 0xa6, 0x09, 0x10, 0x4f, 0xe1, 0xa9, + 0xdc, 0xa2, 0x2f, 0x73, 0x43, 0xe5, 0xd0, 0x54, 0x4c, 0xd5, 0x87, 0x12, 0x5b, 0x47, 0x36, 0x02, + 0x2e, 0xde, 0x38, 0x90, 0x13, 0x0e, 0x41, 0xa1, 0x5f, 0x22, 0xe5, 0xa6, 0x2c, 0xd5, 0x64, 0x52, + 0x3b, 0x76, 0x48, 0x26, 0xaa, 0x11, 0x28, 0x00, 0x00, 0x81, 0xec, 0x7f, 0x95, 0xc8, 0x49, 0xb4, + 0x56, 0xd1, 0xef, 0xec, 0xec, 0xf3, 0xfa, 0xcf, 0x8c, 0x21, 0xf7, 0x67, 0x62, 0xc8, 0xa9, 0x14, + 0xb3, 0x11, 0x45, 0xbe, 0x45, 0x66, 0x94, 0xfb, 0xd6, 0x63, 0x18, 0x4d, 0x96, 0xc7, 0x3b, 0x71, + 0xea, 0xbd, 0x53, 0x42, 0x94, 0xd5, 0xac, 0xa4, 0xc0, 0x21, 0x23, 0x4c, 0x88, 0x47, 0x2f, 0xa7, + 0xc5, 0x17, 0xf3, 0x88, 0x47, 0x8f, 0x36, 0x2c, 0x7e, 0x33, 0x05, 0x0e, 0x19, 0x61, 0x42, 0x7c, + 0x73, 0x10, 0x46, 0x7e, 0x2f, 0x16, 0x5f, 0xca, 0x23, 0xbe, 0x21, 0x31, 0x46, 0x88, 0x6f, 0xa4, + 0xc0, 0x21, 0x23, 0x8c, 0xbe, 0x63, 0x91, 0x33, 0xaf, 0x33, 0x6f, 0xd7, 0xf5, 0xc2, 0x0d, 0xb7, + 0xcf, 0xba, 0xbc, 0x64, 0x8a, 0x17, 0xa2, 0x8e, 0xe9, 0xda, 0x78, 0x0b, 0x59, 0x4b, 0x83, 0xa5, + 0x57, 0x74, 0x0f, 0x5f, 0xd1, 0x99, 0xb5, 0xd1, 0xe2, 0xe0, 0xa0, 0x75, 0xd8, 0x7f, 0x2a, 0x62, + 0xb7, 0xc1, 0xf4, 0xa7, 0xa6, 0x07, 0xb2, 0x3e, 0xc5, 0x03, 0x71, 0x1d, 0xcb, 0xb6, 0xbc, 0xdb, + 0x7c, 0x95, 0x6d, 0xbf, 0xe4, 0xfb, 0xbb, 0xf9, 0x2c, 0xec, 0xc5, 0x14, 0x86, 0xf2, 0xea, 0x52, + 0xc7, 0xe9, 0x01, 0xc8, 0x08, 0xa3, 0xfb, 0xe4, 0xa4, 0x92, 0xa3, 0xa5, 0x2b, 0x03, 0x7b, 0x61, + 0xec, 0xdc, 0xe4, 0xa5, 0x18, 0x42, 0x09, 0x3f, 0x25, 0x5a, 0xd3, 0x29, 0x3a, 0xa4, 0x25, 0xd1, + 0x37, 0x2d, 0x32, 0x27, 0x73, 0x8b, 0xc6, 0x8e, 0xe3, 0x75, 0xd4, 0x6e, 0xa0, 0x81, 0x3d, 0x9f, + 0x23, 0x7d, 0x51, 0x28, 0x4a, 0xb8, 0xac, 0x05, 0x56, 0x33, 0xd8, 0x30, 0x24, 0xcd, 0xfe, 0x71, + 0x91, 0xd0, 0xe1, 0x76, 0x18, 0x7d, 0x3c, 0xe5, 0x2c, 0x2e, 0x64, 0x9c, 0xc5, 0x9c, 0x39, 0xc3, + 0xf0, 0x15, 0x1d, 0x52, 0x56, 0xab, 0xce, 0x57, 0x2f, 0xa1, 0x5a, 0x10, 0x77, 0x94, 0xfe, 0x10, + 0x5e, 0xe4, 0x3a, 0xb8, 0x8b, 0xb8, 0x5b, 0x47, 0x93, 0x34, 0xca, 0x4c, 0xb4, 0x00, 0x1a, 0x62, + 0x9d, 0xad, 0xb4, 0x86, 0xdb, 0xf3, 0x42, 0xee, 0xed, 0xd1, 0x32, 0x67, 0xe3, 0x5a, 0x5b, 0xd1, + 0xc1, 0x94, 0x62, 0xbf, 0x3d, 0x49, 0x8c, 0xfc, 0x87, 0x3e, 0xcf, 0xbd, 0x20, 0x0b, 0xf6, 0xdc, + 0x26, 0x5b, 0x6e, 0x36, 0xfd, 0x81, 0x17, 0xe1, 0xc6, 0xc4, 0xdf, 0x2c, 0x36, 0x53, 0xa3, 0x90, + 0xe1, 0x96, 0xfd, 0x7a, 0xe9, 0xd8, 0x70, 0x63, 0x72, 0xf5, 0xeb, 0x33, 0xc9, 0x31, 0x56, 0xb7, + 0x08, 0x9c, 0xaa, 0x96, 0x8b, 0xc7, 0x58, 0x2d, 0xbb, 0xa4, 0x12, 0xa6, 0x7d, 0xf1, 0x33, 0xb9, + 0x3e, 0x3e, 0xa0, 0xcf, 0x8b, 0x9b, 0x71, 0xb1, 0xa3, 0x8b, 0xe1, 0x85, 0xd6, 0x54, 0xd0, 0x46, + 0x5f, 0x9b, 0x47, 0x6b, 0x2a, 0x23, 0x48, 0xb4, 0xa6, 0x9e, 0x01, 0x81, 0x79, 0x8d, 0x36, 0x15, + 0x30, 0xa5, 0xc1, 0x10, 0x53, 0xa1, 0x4f, 0xa9, 0x0d, 0x00, 0xd9, 0x45, 0xff, 0xc3, 0x0d, 0x58, + 0x8f, 0x79, 0x51, 0x98, 0x64, 0x91, 0x7a, 0x34, 0x84, 0x04, 0x97, 0x0e, 0x08, 0xe9, 0xc7, 0x2d, + 0x1b, 0xac, 0x40, 0x96, 0x73, 0xbc, 0x4b, 0xba, 0xef, 0x93, 0x24, 0xea, 0x09, 0x1d, 0x0c, 0x41, + 0xf4, 0x2b, 0xe4, 0x6c, 0x92, 0x8f, 0xad, 0x30, 0xa7, 0x25, 0xc3, 0x06, 0x36, 0x36, 0x55, 0xa7, + 0xef, 0x3e, 0x3e, 0xfd, 0x6c, 0xe3, 0x20, 0x26, 0x38, 0x78, 0x3e, 0xbd, 0x45, 0xa6, 0x3d, 0xbf, + 0xc5, 0x1f, 0xbb, 0x3c, 0x15, 0xf2, 0x03, 0x4c, 0x9c, 0xea, 0xe3, 0xbd, 0x95, 0x6a, 0xfe, 0x3a, + 0xdd, 0xeb, 0x06, 0x92, 0xaa, 0xb3, 0x4c, 0x0a, 0xa4, 0x24, 0xd9, 0x7f, 0x28, 0x91, 0xf9, 0x11, + 0xf1, 0x9c, 0xde, 0xc0, 0xae, 0x4a, 0xae, 0xe6, 0x61, 0xfc, 0xd9, 0xc8, 0xe8, 0xac, 0xc8, 0x26, + 0x62, 0xb7, 0x7b, 0xa7, 0x9a, 0x88, 0x1a, 0x09, 0x0c, 0x54, 0xdd, 0x25, 0x29, 0x1e, 0xa9, 0x4b, + 0xb2, 0x46, 0x28, 0xbb, 0xc5, 0x37, 0x9e, 0x61, 0x2e, 0x27, 0xfe, 0x55, 0x25, 0x7e, 0xa5, 0xbe, + 0x88, 0xdc, 0xf4, 0xca, 0x10, 0x07, 0x8c, 0x98, 0x25, 0x4a, 0xa4, 0xb6, 0xcf, 0xad, 0x56, 0xac, + 0x57, 0x1e, 0x3b, 0xa3, 0x44, 0xba, 0xaa, 0x07, 0x20, 0xe1, 0xe1, 0x27, 0x28, 0x2e, 0x7b, 0xcb, + 0x79, 0x5a, 0xa0, 0x4a, 0x11, 0xd2, 0xa0, 0x0f, 0xac, 0x77, 0xe9, 0x32, 0x99, 0x95, 0x93, 0x96, + 0x37, 0x56, 0x75, 0x0f, 0x4a, 0x7d, 0x82, 0x3e, 0x83, 0x53, 0x54, 0x0b, 0x26, 0x19, 0x86, 0x2c, + 0xbf, 0xfd, 0xab, 0x22, 0x99, 0x1f, 0x91, 0x04, 0xc7, 0xcd, 0x38, 0xeb, 0x4e, 0x34, 0xe3, 0x3e, + 0x0b, 0x93, 0xe1, 0x99, 0x9d, 0xe7, 0x37, 0x9c, 0xe6, 0x0e, 0xc3, 0xef, 0x19, 0xb1, 0xda, 0xae, + 0x2b, 0x32, 0xe8, 0x71, 0x6d, 0x5d, 0xa5, 0x23, 0x59, 0xd7, 0xd8, 0x16, 0xf1, 0xbc, 0xae, 0x58, + 0x44, 0xc3, 0x69, 0xc3, 0x89, 0x76, 0xb0, 0x55, 0x13, 0x07, 0xcb, 0x95, 0xd4, 0x28, 0x64, 0xb8, + 0xed, 0x5f, 0x58, 0x64, 0x7e, 0x44, 0x32, 0x99, 0x8a, 0x70, 0xd6, 0x31, 0x46, 0x38, 0xd1, 0x0a, + 0x4f, 0x36, 0xd0, 0x6c, 0x85, 0xab, 0xcd, 0xc0, 0x51, 0xfb, 0xc3, 0xa1, 0x75, 0x5e, 0xd9, 0xe3, + 0xd1, 0x20, 0x5f, 0xb3, 0x70, 0x43, 0xf5, 0xe5, 0x94, 0xc9, 0x3c, 0x31, 0x76, 0xee, 0xbb, 0xea, + 0xb5, 0xfd, 0x4c, 0x43, 0xee, 0x4e, 0xb8, 0x16, 0xfb, 0xcf, 0x16, 0x99, 0x49, 0xb7, 0xfd, 0xe8, + 0x7d, 0xa4, 0x38, 0x08, 0x5c, 0x7c, 0xbb, 0x78, 0xc6, 0xcb, 0xb0, 0x0a, 0x82, 0x2e, 0x86, 0x03, + 0xd6, 0x46, 0xd5, 0xc5, 0xc3, 0xdc, 0xb4, 0x41, 0xd0, 0x69, 0x9f, 0x54, 0xfb, 0x81, 0x7f, 0x6b, + 0x5f, 0x95, 0xcb, 0xf9, 0xae, 0x2f, 0x6c, 0x24, 0x00, 0x49, 0xdf, 0xc8, 0x20, 0x82, 0x29, 0xc2, + 0xfe, 0xb9, 0x45, 0xe8, 0x70, 0x75, 0xf0, 0x5f, 0x67, 0x4d, 0x3f, 0x2c, 0x90, 0x49, 0xdc, 0x48, + 0xfa, 0x4d, 0x5e, 0x91, 0xa5, 0x94, 0x9e, 0x6f, 0x85, 0x99, 0x7e, 0x6d, 0x7c, 0xfe, 0xd2, 0x74, + 0xc8, 0xc8, 0xa2, 0x6f, 0x59, 0xe4, 0x14, 0x27, 0xa5, 0xdf, 0x2f, 0x5f, 0x0f, 0xfb, 0xc5, 0x2c, + 0x4c, 0xfd, 0x2c, 0x2e, 0xe2, 0xd4, 0xd0, 0x10, 0x0c, 0x0b, 0xb5, 0xff, 0x52, 0x20, 0xc3, 0x8c, + 0x42, 0xa5, 0x4d, 0x95, 0x4b, 0x59, 0x23, 0x2f, 0x63, 0xe1, 0xa8, 0x28, 0x87, 0x1c, 0x79, 0x9b, + 0x29, 0xdf, 0xe2, 0x95, 0x54, 0xd1, 0x57, 0x0e, 0xfc, 0xee, 0xcb, 0x3c, 0xa3, 0x37, 0x6e, 0x13, + 0x49, 0x58, 0x40, 0x78, 0x6e, 0xd4, 0x53, 0x4d, 0x7d, 0x39, 0x29, 0xdf, 0x1d, 0x93, 0x61, 0x59, + 0xc6, 0xd7, 0x6f, 0x44, 0x86, 0x44, 0xc8, 0x18, 0x0d, 0x46, 0xfb, 0x47, 0xbc, 0xc8, 0xcd, 0x96, + 0xa7, 0x62, 0xbe, 0x2c, 0x77, 0x56, 0x57, 0xb2, 0xed, 0x81, 0x55, 0x45, 0x06, 0x3d, 0x4e, 0xb7, + 0xc8, 0xa4, 0x88, 0x6d, 0x80, 0x87, 0x7a, 0xec, 0x18, 0x29, 0xbf, 0x46, 0x5e, 0x55, 0x08, 0xa0, + 0xa1, 0xec, 0xdf, 0xf3, 0x53, 0x39, 0x5c, 0x95, 0x71, 0x37, 0x78, 0x5a, 0x7c, 0x44, 0x8a, 0x9b, + 0xc0, 0xab, 0xa9, 0x45, 0xde, 0x8b, 0x8b, 0x3c, 0xbd, 0x3e, 0x82, 0x07, 0x46, 0xce, 0x8c, 0xe3, + 0x7b, 0xe1, 0x0e, 0xc4, 0x77, 0x7b, 0x93, 0x90, 0xe4, 0xf3, 0x2c, 0xbd, 0x40, 0x4a, 0x9e, 0xb8, + 0x0d, 0xa7, 0x16, 0x17, 0xa7, 0x90, 0xf2, 0x12, 0x9c, 0x1c, 0xa1, 0x0f, 0x90, 0x89, 0x3d, 0xa7, + 0x3b, 0xd0, 0xb7, 0x0c, 0xe3, 0xab, 0x11, 0xaf, 0x08, 0x22, 0xa8, 0x31, 0xfb, 0x97, 0x05, 0x52, + 0x35, 0x3e, 0x7f, 0x1c, 0x47, 0x22, 0x3b, 0xd1, 0xe7, 0x81, 0x55, 0xdf, 0xea, 0x78, 0x2e, 0xf7, + 0x97, 0x19, 0x11, 0x9e, 0x93, 0x97, 0x10, 0x4f, 0x21, 0x28, 0xe8, 0x4c, 0xe6, 0x53, 0x3c, 0x8e, + 0xcc, 0xc7, 0xfe, 0xae, 0x45, 0x66, 0x33, 0xab, 0x11, 0xdf, 0x84, 0xc2, 0xf8, 0x09, 0x77, 0x22, + 0x2e, 0x8c, 0x12, 0x3e, 0x30, 0xb8, 0x64, 0x82, 0xc2, 0xc2, 0xc8, 0xf5, 0x64, 0x87, 0x59, 0x7c, + 0x4b, 0x2a, 0x64, 0x12, 0x94, 0xd4, 0x28, 0x64, 0xb8, 0xed, 0xb7, 0x2d, 0x72, 0xef, 0x61, 0x8d, + 0x3c, 0x91, 0xae, 0x62, 0xb7, 0x2e, 0x4e, 0x81, 0xac, 0x74, 0xba, 0xba, 0x96, 0x1e, 0x86, 0x2c, + 0xbf, 0xb8, 0x2d, 0x64, 0x90, 0x70, 0x81, 0x71, 0xb0, 0x33, 0xa6, 0x83, 0xc9, 0x67, 0xff, 0xcd, + 0x22, 0xa7, 0x47, 0x55, 0x55, 0x34, 0xd0, 0x37, 0x7b, 0xd4, 0x85, 0xad, 0x6b, 0x47, 0x2f, 0xd4, + 0x6a, 0xf2, 0x7e, 0xcf, 0x15, 0xee, 0xb1, 0xf6, 0x47, 0xdf, 0xf9, 0x59, 0xbc, 0xcc, 0x4f, 0x4b, + 0xcc, 0x43, 0xe7, 0x48, 0x71, 0x97, 0xed, 0x2b, 0x45, 0x80, 0xf8, 0x93, 0x9e, 0x4e, 0x9d, 0x0e, + 0x3c, 0x0e, 0x4f, 0x17, 0x2e, 0x5b, 0x4f, 0x57, 0xde, 0xfe, 0xd9, 0xf9, 0x13, 0x6f, 0xfe, 0xe3, + 0xc2, 0x09, 0xfb, 0x07, 0x16, 0x31, 0x43, 0xbb, 0xb8, 0xdc, 0xb2, 0x13, 0x45, 0x7d, 0x49, 0xc2, + 0x8f, 0x32, 0xf2, 0x72, 0xcb, 0x4b, 0x5b, 0x5b, 0x1b, 0x92, 0x08, 0xc9, 0xb8, 0xf8, 0x38, 0x2a, + 0x1e, 0x42, 0xc5, 0x5d, 0x4a, 0x3e, 0x8e, 0x0a, 0xee, 0x4d, 0xc5, 0x6e, 0x70, 0x88, 0x9b, 0x14, + 0x9e, 0xaf, 0x98, 0xd5, 0x7d, 0xdb, 0xaa, 0x4a, 0xab, 0x15, 0xa7, 0x1e, 0xb3, 0x7f, 0xc3, 0x03, + 0xe4, 0xd0, 0xd7, 0x3a, 0x7a, 0x33, 0x0e, 0xf4, 0x56, 0x7e, 0xeb, 0x1f, 0x9d, 0x1c, 0x1c, 0xd9, + 0x62, 0xdf, 0xb5, 0x08, 0x49, 0x0a, 0x2d, 0xda, 0x25, 0xd3, 0x0a, 0x38, 0x95, 0x5d, 0xe4, 0x59, + 0xf0, 0x69, 0x5c, 0xc0, 0xf4, 0xa6, 0x81, 0x07, 0x29, 0x74, 0x51, 0x40, 0xf4, 0x44, 0x17, 0x4c, + 0x9e, 0x83, 0x42, 0xfa, 0x6a, 0xd7, 0x35, 0x3d, 0x00, 0x09, 0x8f, 0xfd, 0xbd, 0x09, 0x32, 0x3f, + 0xe2, 0x83, 0xc1, 0xff, 0x70, 0x85, 0xcf, 0xc3, 0xb5, 0xba, 0x7f, 0x13, 0x66, 0xc3, 0xbd, 0xba, + 0x9e, 0x23, 0x4a, 0x65, 0xf5, 0x87, 0xb8, 0xaa, 0xe1, 0x7a, 0x4d, 0xd5, 0x98, 0x72, 0x74, 0xc1, + 0xa6, 0x9a, 0x9d, 0x09, 0x19, 0x4c, 0x9e, 0x74, 0x85, 0x57, 0xbe, 0xad, 0x9a, 0x7f, 0x1a, 0xff, + 0x0f, 0x80, 0xba, 0x2d, 0x33, 0x99, 0x67, 0x43, 0x64, 0x9f, 0x07, 0x0c, 0x18, 0x48, 0x81, 0xd2, + 0xef, 0xf0, 0xbc, 0x05, 0x09, 0xcb, 0x41, 0xe4, 0xb6, 0x9d, 0x66, 0xfc, 0x65, 0xfd, 0x88, 0x11, + 0x6c, 0x01, 0x5f, 0x6e, 0x0e, 0x32, 0xf0, 0x30, 0x24, 0xd0, 0xbe, 0xc9, 0x8f, 0x7a, 0x36, 0x37, + 0xbb, 0xbd, 0xc0, 0xcf, 0xe4, 0xdd, 0xf6, 0x4c, 0xe0, 0x57, 0x57, 0xda, 0xd5, 0x98, 0xfd, 0x0e, + 0x2f, 0xae, 0x32, 0xa9, 0x6d, 0xae, 0xda, 0xf1, 0xa6, 0x59, 0x3b, 0x1e, 0x39, 0x43, 0x4f, 0x55, + 0x91, 0x76, 0x9b, 0xcc, 0xa4, 0x9b, 0xf5, 0x46, 0x41, 0x63, 0x1d, 0x56, 0xd0, 0x88, 0x8b, 0x97, + 0x8e, 0xb8, 0x81, 0xc9, 0x8d, 0x18, 0xbf, 0xb5, 0xc7, 0xbd, 0xde, 0x65, 0xa4, 0x43, 0xcc, 0x51, + 0x7f, 0xf0, 0xbd, 0x0f, 0xcf, 0x9d, 0x78, 0x9f, 0xff, 0xfe, 0xce, 0x7f, 0x6f, 0x7e, 0x74, 0xce, + 0x7a, 0x8f, 0xff, 0xde, 0xe7, 0xbf, 0x7f, 0xf2, 0xdf, 0xf7, 0x3f, 0x3e, 0x77, 0xe2, 0x66, 0x61, + 0xef, 0xe2, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x17, 0x05, 0xb7, 0x91, 0x33, 0x00, 0x00, } diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto b/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto deleted file mode 100644 index 0c06f124..00000000 --- a/vendor/github.com/openshift/origin/pkg/build/api/v1/generated.proto +++ /dev/null @@ -1,776 +0,0 @@ - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package github.com.openshift.origin.pkg.build.api.v1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// BinaryBuildRequestOptions are the options required to fully speficy a binary build request -message BinaryBuildRequestOptions { - // metadata for BinaryBuildRequestOptions. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // asFile determines if the binary should be created as a file within the source rather than extracted as an archive - optional string asFile = 2; - - // revision.commit is the value identifying a specific commit - optional string revisionCommit = 3; - - // revision.message is the description of a specific commit - optional string revisionMessage = 4; - - // revision.authorName of the source control user - optional string revisionAuthorName = 5; - - // revision.authorEmail of the source control user - optional string revisionAuthorEmail = 6; - - // revision.committerName of the source control user - optional string revisionCommitterName = 7; - - // revision.committerEmail of the source control user - optional string revisionCommitterEmail = 8; -} - -// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, -// where the file will be extracted and used as the build source. -message BinaryBuildSource { - // asFile indicates that the provided binary input should be considered a single file - // within the build input. For example, specifying "webapp.war" would place the provided - // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build - // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. - // The custom strategy receives this binary as standard input. This filename may not - // contain slashes or be '..' or '.'. - optional string asFile = 1; -} - -// Build encapsulates the inputs needed to produce a new deployable image, as well as -// the status of the execution and a reference to the Pod which executed the build. -message Build { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // spec is all the inputs used to execute the build. - optional BuildSpec spec = 2; - - // status is the current status of the build. - optional BuildStatus status = 3; -} - -// BuildConfig is a template which can be used to create new builds. -message BuildConfig { - // metadata for BuildConfig. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // spec holds all the input necessary to produce a new build, and the conditions when - // to trigger them. - optional BuildConfigSpec spec = 2; - - // status holds any relevant information about a build config - optional BuildConfigStatus status = 3; -} - -// BuildConfigList is a collection of BuildConfigs. -message BuildConfigList { - // metadata for BuildConfigList. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // items is a list of build configs - repeated BuildConfig items = 2; -} - -// BuildConfigSpec describes when and how builds are created -message BuildConfigSpec { - // triggers determine how new Builds can be launched from a BuildConfig. If - // no triggers are defined, a new build can only occur as a result of an - // explicit client build creation. - repeated BuildTriggerPolicy triggers = 1; - - // RunPolicy describes how the new build created from this build - // configuration will be scheduled for execution. - // This is optional, if not specified we default to "Serial". - optional string runPolicy = 2; - - // CommonSpec is the desired build specification - optional CommonSpec commonSpec = 3; -} - -// BuildConfigStatus contains current state of the build config object. -message BuildConfigStatus { - // lastVersion is used to inform about number of last triggered build. - optional int64 lastVersion = 1; -} - -// BuildList is a collection of Builds. -message BuildList { - // metadata for BuildList. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // items is a list of builds - repeated Build items = 2; -} - -// BuildLog is the (unused) resource associated with the build log redirector -message BuildLog { -} - -// BuildLogOptions is the REST options for a build log -message BuildLogOptions { - // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. - optional string container = 1; - - // follow if true indicates that the build log should be streamed until - // the build terminates. - optional bool follow = 2; - - // previous returns previous build logs. Defaults to false. - optional bool previous = 3; - - // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional int64 sinceSeconds = 4; - - // sinceTime is an RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; - - // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - optional bool timestamps = 6; - - // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - optional int64 tailLines = 7; - - // limitBytes, If set, is the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - optional int64 limitBytes = 8; - - // noWait if true causes the call to return immediately even if the build - // is not available yet. Otherwise the server will wait until the build has started. - // TODO: Fix the tag to 'noWait' in v2 - optional bool nowait = 9; - - // version of the build for which to view logs. - optional int64 version = 10; -} - -// BuildOutput is input to a build strategy and describes the Docker image that the strategy -// should produce. -message BuildOutput { - // to defines an optional location to push the output of this build to. - // Kind must be one of 'ImageStreamTag' or 'DockerImage'. - // This value will be used to look up a Docker image repository to push to. - // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of - // the build unless Namespace is specified. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference to = 1; - - // PushSecret is the name of a Secret that would be used for setting - // up the authentication for executing the Docker push to authentication - // enabled Docker Registry (or Docker Hub). - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pushSecret = 2; -} - -// A BuildPostCommitSpec holds a build post commit hook specification. The hook -// executes a command in a temporary container running the build output image, -// immediately after the last layer of the image is committed and before the -// image is pushed to a registry. The command is executed with the current -// working directory ($PWD) set to the image's WORKDIR. -// -// The build will be marked as failed if the hook execution fails. It will fail -// if the script or command return a non-zero exit code, or if there is any -// other error related to starting the temporary container. -// -// There are five different ways to configure the hook. As an example, all forms -// below are equivalent and will execute `rake test --verbose`. -// -// 1. Shell script: -// -// "postCommit": { -// "script": "rake test --verbose", -// } -// -// The above is a convenient form which is equivalent to: -// -// "postCommit": { -// "command": ["/bin/sh", "-ic"], -// "args": ["rake test --verbose"] -// } -// -// 2. A command as the image entrypoint: -// -// "postCommit": { -// "commit": ["rake", "test", "--verbose"] -// } -// -// Command overrides the image entrypoint in the exec form, as documented in -// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. -// -// 3. Pass arguments to the default entrypoint: -// -// "postCommit": { -// "args": ["rake", "test", "--verbose"] -// } -// -// This form is only useful if the image entrypoint can handle arguments. -// -// 4. Shell script with arguments: -// -// "postCommit": { -// "script": "rake test $1", -// "args": ["--verbose"] -// } -// -// This form is useful if you need to pass arguments that would otherwise be -// hard to quote properly in the shell script. In the script, $0 will be -// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. -// -// 5. Command with arguments: -// -// "postCommit": { -// "command": ["rake", "test"], -// "args": ["--verbose"] -// } -// -// This form is equivalent to appending the arguments to the Command slice. -// -// It is invalid to provide both Script and Command simultaneously. If none of -// the fields are specified, the hook is not executed. -message BuildPostCommitSpec { - // command is the command to run. It may not be specified with Script. - // This might be needed if the image doesn't have `/bin/sh`, or if you - // do not want to use a shell. In all other cases, using Script might be - // more convenient. - repeated string command = 1; - - // args is a list of arguments that are provided to either Command, - // Script or the Docker image's default entrypoint. The arguments are - // placed immediately after the command to be run. - repeated string args = 2; - - // script is a shell script to be run with `/bin/sh -ic`. It may not be - // specified with Command. Use Script when a shell script is appropriate - // to execute the post build hook, for example for running unit tests - // with `rake test`. If you need control over the image entrypoint, or - // if the image does not have `/bin/sh`, use Command and/or Args. - // The `-i` flag is needed to support CentOS and RHEL images that use - // Software Collections (SCL), in order to have the appropriate - // collections enabled in the shell. E.g., in the Ruby image, this is - // necessary to make `ruby`, `bundle` and other binaries available in - // the PATH. - optional string script = 3; -} - -// BuildRequest is the resource used to pass parameters to build generator -message BuildRequest { - // metadata for BuildRequest. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // revision is the information from the source for a specific repo snapshot. - optional SourceRevision revision = 2; - - // triggeredByImage is the Image that triggered this build. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference triggeredByImage = 3; - - // from is the reference to the ImageStreamTag that triggered the build. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 4; - - // binary indicates a request to build from a binary provided to the builder - optional BinaryBuildSource binary = 5; - - // lastVersion (optional) is the LastVersion of the BuildConfig that was used - // to generate the build. If the BuildConfig in the generator doesn't match, a build will - // not be generated. - optional int64 lastVersion = 6; - - // env contains additional environment variables you want to pass into a builder container - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 7; - - // triggeredBy describes which triggers started the most recent update to the - // build configuration and contains information about those triggers. - repeated BuildTriggerCause triggeredBy = 8; -} - -// BuildSource is the SCM used for the build. -message BuildSource { - // type of build input to accept - // +k8s:conversion-gen=false - optional string type = 1; - - // binary builds accept a binary as their input. The binary is generally assumed to be a tar, - // gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build - // context and an optional Dockerfile may be specified to override any Dockerfile in the - // build context. For Source builds, this is assumed to be an archive as described above. For - // Source and Docker builds, if binary.asFile is set the build will receive a directory with - // a single file. contextDir may be used when an archive is provided. Custom builds will - // receive this binary as input on STDIN. - optional BinaryBuildSource binary = 2; - - // dockerfile is the raw contents of a Dockerfile which should be built. When this option is - // specified, the FROM may be modified based on your strategy base image and additional ENV - // stanzas from your strategy environment will be added after the FROM, but before the rest - // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like - // git - in those cases the Git repo will have any innate Dockerfile replaced in the context - // dir. - optional string dockerfile = 3; - - // git contains optional information about git build source - optional GitBuildSource git = 4; - - // images describes a set of images to be used to provide source for the build - repeated ImageSource images = 5; - - // contextDir specifies the sub-directory where the source code for the application exists. - // This allows to have buildable sources in directory other than root of - // repository. - optional string contextDir = 6; - - // sourceSecret is the name of a Secret that would be used for setting - // up the authentication for cloning private repository. - // The secret contains valid credentials for remote repository, where the - // data's key represent the authentication method to be used and value is - // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference sourceSecret = 7; - - // secrets represents a list of secrets and their destinations that will - // be used only for the build. - repeated SecretBuildSource secrets = 8; -} - -// BuildSpec has the information to represent a build and also additional -// information about a build -message BuildSpec { - // CommonSpec is the information that represents a build - optional CommonSpec commonSpec = 1; - - // triggeredBy describes which triggers started the most recent update to the - // build configuration and contains information about those triggers. - repeated BuildTriggerCause triggeredBy = 2; -} - -// BuildStatus contains the status of a build -message BuildStatus { - // phase is the point in the build lifecycle. - optional string phase = 1; - - // cancelled describes if a cancel event was triggered for the build. - optional bool cancelled = 2; - - // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. - optional string reason = 3; - - // message is a human-readable message indicating details about why the build has this status. - optional string message = 4; - - // startTimestamp is a timestamp representing the server time when this Build started - // running in a Pod. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTimestamp = 5; - - // completionTimestamp is a timestamp representing the server time when this Build was - // finished, whether that build failed or succeeded. It reflects the time at which - // the Pod running the Build terminated. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTimestamp = 6; - - // duration contains time.Duration object describing build time. - optional int64 duration = 7; - - // outputDockerImageReference contains a reference to the Docker image that - // will be built by this build. Its value is computed from - // Build.Spec.Output.To, and should include the registry address, so that - // it can be used to push and pull the image. - optional string outputDockerImageReference = 8; - - // config is an ObjectReference to the BuildConfig this Build is based on. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference config = 9; -} - -// BuildStrategy contains the details of how to perform a build. -message BuildStrategy { - // type is the kind of build strategy. - // +k8s:conversion-gen=false - optional string type = 1; - - // dockerStrategy holds the parameters to the Docker build strategy. - optional DockerBuildStrategy dockerStrategy = 2; - - // sourceStrategy holds the parameters to the Source build strategy. - optional SourceBuildStrategy sourceStrategy = 3; - - // customStrategy holds the parameters to the Custom build strategy - optional CustomBuildStrategy customStrategy = 4; - - // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. - // This strategy is in tech preview. - optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; -} - -// BuildTriggerCause holds information about a triggered build. It is used for -// displaying build trigger data for each build and build configuration in oc -// describe. It is also used to describe which triggers led to the most recent -// update in the build configuration. -message BuildTriggerCause { - // message is used to store a human readable message for why the build was - // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. - optional string message = 1; - - // genericWebHook holds data about a builds generic webhook trigger. - optional GenericWebHookCause genericWebHook = 2; - - // gitHubWebHook represents data for a GitHub webhook that fired a - // specific build. - optional GitHubWebHookCause githubWebHook = 3; - - // imageChangeBuild stores information about an imagechange event - // that triggered a new build. - optional ImageChangeCause imageChangeBuild = 4; -} - -// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. -message BuildTriggerPolicy { - // type is the type of build trigger - optional string type = 1; - - // github contains the parameters for a GitHub webhook type of trigger - optional WebHookTrigger github = 2; - - // generic contains the parameters for a Generic webhook type of trigger - optional WebHookTrigger generic = 3; - - // imageChange contains parameters for an ImageChange type of trigger - optional ImageChangeTrigger imageChange = 4; -} - -// CommonSpec encapsulates all the inputs necessary to represent a build. -message CommonSpec { - // serviceAccount is the name of the ServiceAccount to use to run the pod - // created by this build. - // The pod will be allowed to use secrets referenced by the ServiceAccount - optional string serviceAccount = 1; - - // source describes the SCM in use. - optional BuildSource source = 2; - - // revision is the information from the source for a specific repo snapshot. - // This is optional. - optional SourceRevision revision = 3; - - // strategy defines how to perform a build. - optional BuildStrategy strategy = 4; - - // output describes the Docker image the Strategy should produce. - optional BuildOutput output = 5; - - // resources computes resource requirements to execute the build. - optional k8s.io.kubernetes.pkg.api.v1.ResourceRequirements resources = 6; - - // postCommit is a build hook executed after the build output image is - // committed, before it is pushed to a registry. - optional BuildPostCommitSpec postCommit = 7; - - // completionDeadlineSeconds is an optional duration in seconds, counted from - // the time when a build pod gets scheduled in the system, that the build may - // be active on a node before the system actively tries to terminate the - // build; value must be positive integer - optional int64 completionDeadlineSeconds = 8; -} - -// CustomBuildStrategy defines input parameters specific to Custom build. -message CustomBuildStrategy { - // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which - // the docker image should be pulled - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // pullSecret is the name of a Secret that would be used for setting up - // the authentication for pulling the Docker images from the private Docker - // registries - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 2; - - // env contains additional environment variables you want to pass into a builder container - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 3; - - // exposeDockerSocket will allow running Docker commands (and build Docker images) from - // inside the Docker container. - // TODO: Allow admins to enforce 'false' for this option - optional bool exposeDockerSocket = 4; - - // forcePull describes if the controller should configure the build pod to always pull the images - // for the builder or only pull if it is not present locally - optional bool forcePull = 5; - - // secrets is a list of additional secrets that will be included in the build pod - repeated SecretSpec secrets = 6; - - // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder - optional string buildAPIVersion = 7; -} - -// DockerBuildStrategy defines input parameters specific to Docker build. -message DockerBuildStrategy { - // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which - // the docker image should be pulled - // the resulting image will be used in the FROM line of the Dockerfile for this build. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // pullSecret is the name of a Secret that would be used for setting up - // the authentication for pulling the Docker images from the private Docker - // registries - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 2; - - // noCache if set to true indicates that the docker build must be executed with the - // --no-cache=true flag - optional bool noCache = 3; - - // env contains additional environment variables you want to pass into a builder container - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 4; - - // forcePull describes if the builder should pull the images from registry prior to building. - optional bool forcePull = 5; - - // dockerfilePath is the path of the Dockerfile that will be used to build the Docker image, - // relative to the root of the context (contextDir). - optional string dockerfilePath = 6; -} - -// GenericWebHookCause holds information about a generic WebHook that -// triggered a build. -message GenericWebHookCause { - // revision is an optional field that stores the git source revision - // information of the generic webhook trigger when it is available. - optional SourceRevision revision = 1; - - // secret is the obfuscated webhook secret that triggered a build. - optional string secret = 2; -} - -// GenericWebHookEvent is the payload expected for a generic webhook post -message GenericWebHookEvent { - // type is the type of source repository - // +k8s:conversion-gen=false - optional string type = 1; - - // git is the git information if the Type is BuildSourceGit - optional GitInfo git = 2; - - // env contains additional environment variables you want to pass into a builder container - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 3; -} - -// GitBuildSource defines the parameters of a Git SCM -message GitBuildSource { - // uri points to the source that will be built. The structure of the source - // will depend on the type of build to run - optional string uri = 1; - - // ref is the branch/tag/ref to build. - optional string ref = 2; - - // httpProxy is a proxy used to reach the git repository over http - optional string httpProxy = 3; - - // httpsProxy is a proxy used to reach the git repository over https - optional string httpsProxy = 4; -} - -// GitHubWebHookCause has information about a GitHub webhook that triggered a -// build. -message GitHubWebHookCause { - // revision is the git revision information of the trigger. - optional SourceRevision revision = 1; - - // secret is the obfuscated webhook secret that triggered a build. - optional string secret = 2; -} - -// GitInfo is the aggregated git information for a generic webhook post -message GitInfo { - optional GitBuildSource gitBuildSource = 1; - - optional GitSourceRevision gitSourceRevision = 2; -} - -// GitSourceRevision is the commit information from a git source for a build -message GitSourceRevision { - // commit is the commit hash identifying a specific commit - optional string commit = 1; - - // author is the author of a specific commit - optional SourceControlUser author = 2; - - // committer is the committer of a specific commit - optional SourceControlUser committer = 3; - - // message is the description of a specific commit - optional string message = 4; -} - -// ImageChangeCause contains information about the image that triggered a -// build -message ImageChangeCause { - // imageID is the ID of the image that triggered a a new build. - optional string imageID = 1; - - // fromRef contains detailed information about an image that triggered a - // build. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference fromRef = 2; -} - -// ImageChangeTrigger allows builds to be triggered when an ImageStream changes -message ImageChangeTrigger { - // lastTriggeredImageID is used internally by the ImageChangeController to save last - // used image ID for build - optional string lastTriggeredImageID = 1; - - // from is a reference to an ImageStreamTag that will trigger a build when updated - // It is optional. If no From is specified, the From image from the build strategy - // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in - // a build configuration. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 2; -} - -// ImageSource describes an image that is used as source for the build -message ImageSource { - // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to - // copy source from. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // paths is a list of source and destination paths to copy from the image. - repeated ImageSourcePath paths = 2; - - // pullSecret is a reference to a secret to be used to pull the image from a registry - // If the image is pulled from the OpenShift registry, this field does not need to be set. - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 3; -} - -// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. -message ImageSourcePath { - // sourcePath is the absolute path of the file or directory inside the image to - // copy to the build directory. - optional string sourcePath = 1; - - // destinationDir is the relative directory within the build directory - // where files copied from the image are placed. - optional string destinationDir = 2; -} - -// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. -// This strategy is in tech preview. -message JenkinsPipelineBuildStrategy { - // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline - // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are - // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. - optional string jenkinsfilePath = 1; - - // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. - optional string jenkinsfile = 2; -} - -// SecretBuildSource describes a secret and its destination directory that will be -// used only at the build time. The content of the secret referenced here will -// be copied into the destination directory instead of mounting. -message SecretBuildSource { - // secret is a reference to an existing secret that you want to use in your - // build. - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secret = 1; - - // destinationDir is the directory where the files from the secret should be - // available for the build time. - // For the Source build strategy, these will be injected into a container - // where the assemble script runs. Later, when the script finishes, all files - // injected will be truncated to zero length. - // For the Docker build strategy, these will be copied into the build - // directory, where the Dockerfile is located, so users can ADD or COPY them - // during docker build. - optional string destinationDir = 2; -} - -// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point -message SecretSpec { - // secretSource is a reference to the secret - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretSource = 1; - - // mountPath is the path at which to mount the secret - optional string mountPath = 2; -} - -// SourceBuildStrategy defines input parameters specific to an Source build. -message SourceBuildStrategy { - // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which - // the docker image should be pulled - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // pullSecret is the name of a Secret that would be used for setting up - // the authentication for pulling the Docker images from the private Docker - // registries - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference pullSecret = 2; - - // env contains additional environment variables you want to pass into a builder container - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 3; - - // scripts is the location of Source scripts - optional string scripts = 4; - - // incremental flag forces the Source build to do incremental builds if true. - optional bool incremental = 5; - - // forcePull describes if the builder should pull the images from registry prior to building. - optional bool forcePull = 6; - - // runtimeImage is an optional image that is used to run an application - // without unneeded dependencies installed. The building of the application - // is still done in the builder image but, post build, you can copy the - // needed artifacts in the runtime image for use. - // This field and the feature it enables are in tech preview. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference runtimeImage = 7; - - // runtimeArtifacts specifies a list of source/destination pairs that will be - // copied from the builder to the runtime image. sourcePath can be a file or - // directory. destinationDir must be a directory. destinationDir can also be - // empty or equal to ".", in this case it just refers to the root of WORKDIR. - // This field and the feature it enables are in tech preview. - repeated ImageSourcePath runtimeArtifacts = 8; -} - -// SourceControlUser defines the identity of a user of source control -message SourceControlUser { - // name of the source control user - optional string name = 1; - - // email of the source control user - optional string email = 2; -} - -// SourceRevision is the revision or commit information from the source for the build -message SourceRevision { - // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' - // +k8s:conversion-gen=false - optional string type = 1; - - // Git contains information about git-based build source - optional GitSourceRevision git = 2; -} - -// WebHookTrigger is a trigger that gets invoked using a webhook type of post -message WebHookTrigger { - // secret used to validate requests. - optional string secret = 1; - - // allowEnv determines whether the webhook can set environment variables; can only - // be set to true for GenericWebHook. - optional bool allowEnv = 2; -} - diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go index bab94f8a..b1fef1ac 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/swagger_doc.go @@ -42,7 +42,7 @@ func (Build) SwaggerDoc() map[string]string { } var map_BuildConfig = map[string]string{ - "": "BuildConfig is a template which can be used to create new builds.", + "": "Build configurations define a build process for new Docker images. There are three types of builds possible - a Docker build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary Docker images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the Docker registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.", "metadata": "metadata for BuildConfig.", "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", "status": "status holds any relevant information about a build config", @@ -118,9 +118,10 @@ func (BuildLogOptions) SwaggerDoc() map[string]string { } var map_BuildOutput = map[string]string{ - "": "BuildOutput is input to a build strategy and describes the Docker image that the strategy should produce.", - "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a Docker image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", - "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "": "BuildOutput is input to a build strategy and describes the Docker image that the strategy should produce.", + "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a Docker image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", + "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", } func (BuildOutput) SwaggerDoc() map[string]string { @@ -243,6 +244,7 @@ var map_CommonSpec = map[string]string{ "resources": "resources computes resource requirements to execute the build.", "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.", } func (CommonSpec) SwaggerDoc() map[string]string { @@ -300,11 +302,9 @@ func (GenericWebHookEvent) SwaggerDoc() map[string]string { } var map_GitBuildSource = map[string]string{ - "": "GitBuildSource defines the parameters of a Git SCM", - "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", - "ref": "ref is the branch/tag/ref to build.", - "httpProxy": "httpProxy is a proxy used to reach the git repository over http", - "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", + "": "GitBuildSource defines the parameters of a Git SCM", + "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", + "ref": "ref is the branch/tag/ref to build.", } func (GitBuildSource) SwaggerDoc() map[string]string { @@ -361,8 +361,18 @@ func (ImageChangeTrigger) SwaggerDoc() map[string]string { return map_ImageChangeTrigger } +var map_ImageLabel = map[string]string{ + "": "ImageLabel represents a label applied to the resulting image.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + var map_ImageSource = map[string]string{ - "": "ImageSource describes an image that is used as source for the build", + "": "ImageSource is used to describe build source that will be extracted from an image. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. A list of paths to copy from the image and their respective destination within the build directory must be specified in the paths array.", "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.", "paths": "paths is a list of source and destination paths to copy from the image.", "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.", @@ -392,6 +402,17 @@ func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string { return map_JenkinsPipelineBuildStrategy } +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines what proxies to use for an operation", + "httpProxy": "httpProxy is a proxy used to reach the git repository over http", + "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", + "noProxy": "noProxy is the list of domains for which the proxy should not be used", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + var map_SecretBuildSource = map[string]string{ "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.", "secret": "secret is a reference to an existing secret that you want to use in your build.", diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go index a3e8f56c..798218e9 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/types.go @@ -1,6 +1,7 @@ package v1 import ( + "fmt" "time" "k8s.io/kubernetes/pkg/api/unversioned" @@ -34,6 +35,15 @@ type BuildSpec struct { TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,2,rep,name=triggeredBy"` } +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNodeSelector map[string]string + +func (t OptionalNodeSelector) String() string { + return fmt.Sprintf("%v", map[string]string(t)) +} + // CommonSpec encapsulates all the inputs necessary to represent a build. type CommonSpec struct { // serviceAccount is the name of the ServiceAccount to use to run the pod @@ -66,6 +76,12 @@ type CommonSpec struct { // be active on a node before the system actively tries to terminate the // build; value must be positive integer CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"` } // BuildTriggerCause holds information about a triggered build. It is used for @@ -255,7 +271,11 @@ type BuildSource struct { Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` } -// ImageSource describes an image that is used as source for the build +// ImageSource is used to describe build source that will be extracted from an image. A reference of +// type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified +// to pull the image from an external registry or override the default service account secret if pulling +// from the internal registry. A list of paths to copy from the image and their respective destination +// within the build directory must be specified in the paths array. type ImageSource struct { // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to // copy source from. @@ -336,6 +356,18 @@ type GitSourceRevision struct { Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` } +// ProxyConfig defines what proxies to use for an operation +type ProxyConfig struct { + // httpProxy is a proxy used to reach the git repository over http + HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` + + // httpsProxy is a proxy used to reach the git repository over https + HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` + + // noProxy is the list of domains for which the proxy should not be used + NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"` +} + // GitBuildSource defines the parameters of a Git SCM type GitBuildSource struct { // uri points to the source that will be built. The structure of the source @@ -345,11 +377,8 @@ type GitBuildSource struct { // ref is the branch/tag/ref to build. Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` - // httpProxy is a proxy used to reach the git repository over http - HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` - - // httpsProxy is a proxy used to reach the git repository over https - HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` + // proxyConfig defines the proxies to use for the git clone operation + ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"` } // SourceControlUser defines the identity of a user of source control @@ -609,9 +638,24 @@ type BuildOutput struct { // up the authentication for executing the Docker push to authentication // enabled Docker Registry (or Docker Hub). PushSecret *kapi.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"` } -// BuildConfig is a template which can be used to create new builds. +// ImageLabel represents a label applied to the resulting image. +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // value defines the literal value of the label. + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// Build configurations define a build process for new Docker images. There are three types of builds possible - a Docker build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary Docker images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the Docker registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. type BuildConfig struct { unversioned.TypeMeta `json:",inline"` // metadata for BuildConfig. diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go index 3888ae35..63850bf7 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.conversion.go @@ -81,12 +81,16 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_ImageChangeCause_To_v1_ImageChangeCause, Convert_v1_ImageChangeTrigger_To_api_ImageChangeTrigger, Convert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger, + Convert_v1_ImageLabel_To_api_ImageLabel, + Convert_api_ImageLabel_To_v1_ImageLabel, Convert_v1_ImageSource_To_api_ImageSource, Convert_api_ImageSource_To_v1_ImageSource, Convert_v1_ImageSourcePath_To_api_ImageSourcePath, Convert_api_ImageSourcePath_To_v1_ImageSourcePath, Convert_v1_JenkinsPipelineBuildStrategy_To_api_JenkinsPipelineBuildStrategy, Convert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy, + Convert_v1_ProxyConfig_To_api_ProxyConfig, + Convert_api_ProxyConfig_To_v1_ProxyConfig, Convert_v1_SecretBuildSource_To_api_SecretBuildSource, Convert_api_SecretBuildSource_To_v1_SecretBuildSource, Convert_v1_SecretSpec_To_api_SecretSpec, @@ -486,6 +490,17 @@ func autoConvert_v1_BuildOutput_To_api_BuildOutput(in *BuildOutput, out *api.Bui } else { out.PushSecret = nil } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]api.ImageLabel, len(*in)) + for i := range *in { + if err := Convert_v1_ImageLabel_To_api_ImageLabel(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ImageLabels = nil + } return nil } @@ -508,6 +523,17 @@ func autoConvert_api_BuildOutput_To_v1_BuildOutput(in *api.BuildOutput, out *Bui } else { out.PushSecret = nil } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + for i := range *in { + if err := Convert_api_ImageLabel_To_v1_ImageLabel(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.ImageLabels = nil + } return nil } @@ -1145,6 +1171,15 @@ func autoConvert_v1_CommonSpec_To_api_CommonSpec(in *CommonSpec, out *api.Common return err } out.CompletionDeadlineSeconds = in.CompletionDeadlineSeconds + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.NodeSelector = nil + } return nil } @@ -1179,6 +1214,15 @@ func autoConvert_api_CommonSpec_To_v1_CommonSpec(in *api.CommonSpec, out *Common return err } out.CompletionDeadlineSeconds = in.CompletionDeadlineSeconds + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.NodeSelector = nil + } return nil } @@ -1445,8 +1489,9 @@ func Convert_api_GenericWebHookEvent_To_v1_GenericWebHookEvent(in *api.GenericWe func autoConvert_v1_GitBuildSource_To_api_GitBuildSource(in *GitBuildSource, out *api.GitBuildSource, s conversion.Scope) error { out.URI = in.URI out.Ref = in.Ref - out.HTTPProxy = in.HTTPProxy - out.HTTPSProxy = in.HTTPSProxy + if err := Convert_v1_ProxyConfig_To_api_ProxyConfig(&in.ProxyConfig, &out.ProxyConfig, s); err != nil { + return err + } return nil } @@ -1457,8 +1502,9 @@ func Convert_v1_GitBuildSource_To_api_GitBuildSource(in *GitBuildSource, out *ap func autoConvert_api_GitBuildSource_To_v1_GitBuildSource(in *api.GitBuildSource, out *GitBuildSource, s conversion.Scope) error { out.URI = in.URI out.Ref = in.Ref - out.HTTPProxy = in.HTTPProxy - out.HTTPSProxy = in.HTTPSProxy + if err := Convert_api_ProxyConfig_To_v1_ProxyConfig(&in.ProxyConfig, &out.ProxyConfig, s); err != nil { + return err + } return nil } @@ -1634,6 +1680,26 @@ func Convert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger(in *api.ImageChange return autoConvert_api_ImageChangeTrigger_To_v1_ImageChangeTrigger(in, out, s) } +func autoConvert_v1_ImageLabel_To_api_ImageLabel(in *ImageLabel, out *api.ImageLabel, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_ImageLabel_To_api_ImageLabel(in *ImageLabel, out *api.ImageLabel, s conversion.Scope) error { + return autoConvert_v1_ImageLabel_To_api_ImageLabel(in, out, s) +} + +func autoConvert_api_ImageLabel_To_v1_ImageLabel(in *api.ImageLabel, out *ImageLabel, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_api_ImageLabel_To_v1_ImageLabel(in *api.ImageLabel, out *ImageLabel, s conversion.Scope) error { + return autoConvert_api_ImageLabel_To_v1_ImageLabel(in, out, s) +} + func autoConvert_v1_ImageSource_To_api_ImageSource(in *ImageSource, out *api.ImageSource, s conversion.Scope) error { if err := api_v1.Convert_v1_ObjectReference_To_api_ObjectReference(&in.From, &out.From, s); err != nil { return err @@ -1736,6 +1802,28 @@ func Convert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy return autoConvert_api_JenkinsPipelineBuildStrategy_To_v1_JenkinsPipelineBuildStrategy(in, out, s) } +func autoConvert_v1_ProxyConfig_To_api_ProxyConfig(in *ProxyConfig, out *api.ProxyConfig, s conversion.Scope) error { + out.HTTPProxy = in.HTTPProxy + out.HTTPSProxy = in.HTTPSProxy + out.NoProxy = in.NoProxy + return nil +} + +func Convert_v1_ProxyConfig_To_api_ProxyConfig(in *ProxyConfig, out *api.ProxyConfig, s conversion.Scope) error { + return autoConvert_v1_ProxyConfig_To_api_ProxyConfig(in, out, s) +} + +func autoConvert_api_ProxyConfig_To_v1_ProxyConfig(in *api.ProxyConfig, out *ProxyConfig, s conversion.Scope) error { + out.HTTPProxy = in.HTTPProxy + out.HTTPSProxy = in.HTTPSProxy + out.NoProxy = in.NoProxy + return nil +} + +func Convert_api_ProxyConfig_To_v1_ProxyConfig(in *api.ProxyConfig, out *ProxyConfig, s conversion.Scope) error { + return autoConvert_api_ProxyConfig_To_v1_ProxyConfig(in, out, s) +} + func autoConvert_v1_SecretBuildSource_To_api_SecretBuildSource(in *SecretBuildSource, out *api.SecretBuildSource, s conversion.Scope) error { if err := api_v1.Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.Secret, &out.Secret, s); err != nil { return err diff --git a/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go index 52189dfa..8182d989 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/v1/zz_generated.deepcopy.go @@ -50,9 +50,11 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitSourceRevision, InType: reflect.TypeOf(&GitSourceRevision{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageChangeCause, InType: reflect.TypeOf(&ImageChangeCause{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageChangeTrigger, InType: reflect.TypeOf(&ImageChangeTrigger{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageLabel, InType: reflect.TypeOf(&ImageLabel{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageSource, InType: reflect.TypeOf(&ImageSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ImageSourcePath, InType: reflect.TypeOf(&ImageSourcePath{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JenkinsPipelineBuildStrategy, InType: reflect.TypeOf(&JenkinsPipelineBuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProxyConfig, InType: reflect.TypeOf(&ProxyConfig{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretBuildSource, InType: reflect.TypeOf(&SecretBuildSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretSpec, InType: reflect.TypeOf(&SecretSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SourceBuildStrategy, InType: reflect.TypeOf(&SourceBuildStrategy{})}, @@ -274,6 +276,15 @@ func DeepCopy_v1_BuildOutput(in interface{}, out interface{}, c *conversion.Clon } else { out.PushSecret = nil } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + for i := range *in { + (*out)[i] = (*in)[i] + } + } else { + out.ImageLabels = nil + } return nil } } @@ -636,6 +647,15 @@ func DeepCopy_v1_CommonSpec(in interface{}, out interface{}, c *conversion.Clone } else { out.CompletionDeadlineSeconds = nil } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(OptionalNodeSelector) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.NodeSelector = nil + } return nil } } @@ -768,19 +788,8 @@ func DeepCopy_v1_GitBuildSource(in interface{}, out interface{}, c *conversion.C out := out.(*GitBuildSource) out.URI = in.URI out.Ref = in.Ref - if in.HTTPProxy != nil { - in, out := &in.HTTPProxy, &out.HTTPProxy - *out = new(string) - **out = **in - } else { - out.HTTPProxy = nil - } - if in.HTTPSProxy != nil { - in, out := &in.HTTPSProxy, &out.HTTPSProxy - *out = new(string) - **out = **in - } else { - out.HTTPSProxy = nil + if err := DeepCopy_v1_ProxyConfig(&in.ProxyConfig, &out.ProxyConfig, c); err != nil { + return err } return nil } @@ -860,6 +869,16 @@ func DeepCopy_v1_ImageChangeTrigger(in interface{}, out interface{}, c *conversi } } +func DeepCopy_v1_ImageLabel(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ImageLabel) + out := out.(*ImageLabel) + out.Name = in.Name + out.Value = in.Value + return nil + } +} + func DeepCopy_v1_ImageSource(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*ImageSource) @@ -905,6 +924,35 @@ func DeepCopy_v1_JenkinsPipelineBuildStrategy(in interface{}, out interface{}, c } } +func DeepCopy_v1_ProxyConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProxyConfig) + out := out.(*ProxyConfig) + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } else { + out.HTTPProxy = nil + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } else { + out.HTTPSProxy = nil + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } else { + out.NoProxy = nil + } + return nil + } +} + func DeepCopy_v1_SecretBuildSource(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SecretBuildSource) diff --git a/vendor/github.com/openshift/origin/pkg/build/client/clients.go b/vendor/github.com/openshift/origin/pkg/build/client/clients.go new file mode 100644 index 00000000..00d70144 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/client/clients.go @@ -0,0 +1,109 @@ +package client + +import ( + buildapi "github.com/openshift/origin/pkg/build/api" + osclient "github.com/openshift/origin/pkg/client" + kapi "k8s.io/kubernetes/pkg/api" +) + +// BuildConfigGetter provides methods for getting BuildConfigs +type BuildConfigGetter interface { + Get(namespace, name string) (*buildapi.BuildConfig, error) +} + +// BuildConfigUpdater provides methods for updating BuildConfigs +type BuildConfigUpdater interface { + Update(buildConfig *buildapi.BuildConfig) error +} + +// OSClientBuildConfigClient delegates get and update operations to the OpenShift client interface +type OSClientBuildConfigClient struct { + Client osclient.Interface +} + +// NewOSClientBuildConfigClient creates a new build config client that uses an openshift client to create and get BuildConfigs +func NewOSClientBuildConfigClient(client osclient.Interface) *OSClientBuildConfigClient { + return &OSClientBuildConfigClient{Client: client} +} + +// Get returns a BuildConfig using the OpenShift client. +func (c OSClientBuildConfigClient) Get(namespace, name string) (*buildapi.BuildConfig, error) { + return c.Client.BuildConfigs(namespace).Get(name) +} + +// Update updates a BuildConfig using the OpenShift client. +func (c OSClientBuildConfigClient) Update(buildConfig *buildapi.BuildConfig) error { + _, err := c.Client.BuildConfigs(buildConfig.Namespace).Update(buildConfig) + return err +} + +// BuildUpdater provides methods for updating existing Builds. +type BuildUpdater interface { + Update(namespace string, build *buildapi.Build) error +} + +// BuildLister provides methods for listing the Builds. +type BuildLister interface { + List(namespace string, opts kapi.ListOptions) (*buildapi.BuildList, error) +} + +// OSClientBuildClient deletes build create and update operations to the OpenShift client interface +type OSClientBuildClient struct { + Client osclient.Interface +} + +// NewOSClientBuildClient creates a new build client that uses an openshift client to update builds +func NewOSClientBuildClient(client osclient.Interface) *OSClientBuildClient { + return &OSClientBuildClient{Client: client} +} + +// Update updates builds using the OpenShift client. +func (c OSClientBuildClient) Update(namespace string, build *buildapi.Build) error { + _, e := c.Client.Builds(namespace).Update(build) + return e +} + +// List lists the builds using the OpenShift client. +func (c OSClientBuildClient) List(namespace string, opts kapi.ListOptions) (*buildapi.BuildList, error) { + return c.Client.Builds(namespace).List(opts) +} + +// BuildCloner provides methods for cloning builds +type BuildCloner interface { + Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) +} + +// OSClientBuildClonerClient creates a new build client that uses an openshift client to clone builds +type OSClientBuildClonerClient struct { + Client osclient.Interface +} + +// NewOSClientBuildClonerClient creates a new build client that uses an openshift client to clone builds +func NewOSClientBuildClonerClient(client osclient.Interface) *OSClientBuildClonerClient { + return &OSClientBuildClonerClient{Client: client} +} + +// Clone generates new build for given build name +func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) { + return c.Client.Builds(namespace).Clone(request) +} + +// BuildConfigInstantiator provides methods for instantiating builds from build configs +type BuildConfigInstantiator interface { + Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) +} + +// OSClientBuildConfigInstantiatorClient creates a new build client that uses an openshift client to create builds +type OSClientBuildConfigInstantiatorClient struct { + Client osclient.Interface +} + +// NewOSClientBuildConfigInstantiatorClient creates a new build client that uses an openshift client to create builds +func NewOSClientBuildConfigInstantiatorClient(client osclient.Interface) *OSClientBuildConfigInstantiatorClient { + return &OSClientBuildConfigInstantiatorClient{Client: client} +} + +// Instantiate generates new build for given buildConfig +func (c OSClientBuildConfigInstantiatorClient) Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) { + return c.Client.BuildConfigs(namespace).Instantiate(request) +} diff --git a/vendor/github.com/openshift/origin/pkg/build/util/doc.go b/vendor/github.com/openshift/origin/pkg/build/util/doc.go new file mode 100644 index 00000000..07e585bb --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/util/doc.go @@ -0,0 +1,3 @@ +// Package util contains common functions that are used +// by the rest of the OpenShift build system. +package util diff --git a/vendor/github.com/openshift/origin/pkg/build/util/util.go b/vendor/github.com/openshift/origin/pkg/build/util/util.go new file mode 100644 index 00000000..270ff52a --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/build/util/util.go @@ -0,0 +1,152 @@ +package util + +import ( + "fmt" + "strconv" + "strings" + + kapi "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/labels" + + "github.com/golang/glog" + buildapi "github.com/openshift/origin/pkg/build/api" + buildclient "github.com/openshift/origin/pkg/build/client" +) + +const ( + // NoBuildLogsMessage reports that no build logs are available + NoBuildLogsMessage = "No logs are available." +) + +// GetBuildName returns name of the build pod. +func GetBuildName(pod *kapi.Pod) string { + if pod == nil { + return "" + } + return pod.Annotations[buildapi.BuildAnnotation] +} + +// GetInputReference returns the From ObjectReference associated with the +// BuildStrategy. +func GetInputReference(strategy buildapi.BuildStrategy) *kapi.ObjectReference { + switch { + case strategy.SourceStrategy != nil: + return &strategy.SourceStrategy.From + case strategy.DockerStrategy != nil: + return strategy.DockerStrategy.From + case strategy.CustomStrategy != nil: + return &strategy.CustomStrategy.From + default: + return nil + } +} + +// IsBuildComplete returns whether the provided build is complete or not +func IsBuildComplete(build *buildapi.Build) bool { + return build.Status.Phase != buildapi.BuildPhaseRunning && build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew +} + +// IsPaused returns true if the provided BuildConfig is paused and cannot be used to create a new Build +func IsPaused(bc *buildapi.BuildConfig) bool { + return strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true" +} + +// BuildNumber returns the given build number. +func BuildNumber(build *buildapi.Build) (int64, error) { + annotations := build.GetAnnotations() + if stringNumber, ok := annotations[buildapi.BuildNumberAnnotation]; ok { + return strconv.ParseInt(stringNumber, 10, 64) + } + return 0, fmt.Errorf("build %s/%s does not have %s annotation", build.Namespace, build.Name, buildapi.BuildNumberAnnotation) +} + +// BuildRunPolicy returns the scheduling policy for the build based on the +// "queued" label. +func BuildRunPolicy(build *buildapi.Build) buildapi.BuildRunPolicy { + labels := build.GetLabels() + if value, found := labels[buildapi.BuildRunPolicyLabel]; found { + switch value { + case "Parallel": + return buildapi.BuildRunPolicyParallel + case "Serial": + return buildapi.BuildRunPolicySerial + case "SerialLatestOnly": + return buildapi.BuildRunPolicySerialLatestOnly + } + } + glog.V(5).Infof("Build %s/%s does not have start policy label set, using default (Serial)") + return buildapi.BuildRunPolicySerial +} + +// BuildNameForConfigVersion returns the name of the version-th build +// for the config that has the provided name. +func BuildNameForConfigVersion(name string, version int) string { + return fmt.Sprintf("%s-%d", name, version) +} + +// BuildConfigSelector returns a label Selector which can be used to find all +// builds for a BuildConfig. +func BuildConfigSelector(name string) labels.Selector { + return labels.Set{buildapi.BuildConfigLabel: buildapi.LabelValue(name)}.AsSelector() +} + +// BuildConfigSelectorDeprecated returns a label Selector which can be used to find +// all builds for a BuildConfig that use the deprecated labels. +func BuildConfigSelectorDeprecated(name string) labels.Selector { + return labels.Set{buildapi.BuildConfigLabelDeprecated: name}.AsSelector() +} + +type buildFilter func(buildapi.Build) bool + +// BuildConfigBuilds return a list of builds for the given build config. +// Optionally you can specify a filter function to select only builds that +// matches your criteria. +func BuildConfigBuilds(c buildclient.BuildLister, namespace, name string, filterFunc buildFilter) (*buildapi.BuildList, error) { + result, err := c.List(namespace, kapi.ListOptions{ + LabelSelector: BuildConfigSelector(name), + }) + if err != nil { + return nil, err + } + if filterFunc == nil { + return result, nil + } + filteredList := &buildapi.BuildList{TypeMeta: result.TypeMeta, ListMeta: result.ListMeta} + for _, b := range result.Items { + if filterFunc(b) { + filteredList.Items = append(filteredList.Items, b) + } + } + return filteredList, nil +} + +// ConfigNameForBuild returns the name of the build config from a +// build name. +func ConfigNameForBuild(build *buildapi.Build) string { + if build == nil { + return "" + } + if build.Annotations != nil { + if _, exists := build.Annotations[buildapi.BuildConfigAnnotation]; exists { + return build.Annotations[buildapi.BuildConfigAnnotation] + } + } + if _, exists := build.Labels[buildapi.BuildConfigLabel]; exists { + return build.Labels[buildapi.BuildConfigLabel] + } + return build.Labels[buildapi.BuildConfigLabelDeprecated] +} + +// VersionForBuild returns the version from the provided build name. +// If no version can be found, 0 is returned to indicate no version. +func VersionForBuild(build *buildapi.Build) int { + if build == nil { + return 0 + } + versionString := build.Annotations[buildapi.BuildNumberAnnotation] + version, err := strconv.Atoi(versionString) + if err != nil { + return 0 + } + return version +} From 44f94cc072dbc96aa75c2fdf694f9bfd60d1e896 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Mon, 26 Dec 2016 17:34:19 +0530 Subject: [PATCH 30/33] Fixes/updates based on review. --- pkg/testutils/git.go | 8 ++++---- pkg/transformer/openshift/openshift_test.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/testutils/git.go b/pkg/testutils/git.go index 158f9932..33316402 100644 --- a/pkg/testutils/git.go +++ b/pkg/testutils/git.go @@ -29,18 +29,18 @@ func CreateLocalGitDirectory(t *testing.T) string { cmd.Dir = dir _, err := cmd.Output() if err != nil { - fmt.Println("create local git dir", err) + t.Logf("create local git dir: %v", err) t.Fatal(err) } return dir } func SetGitRemote(t *testing.T, dir string, remote string, remoteUrl string) { - cmd := NewCommand("git remote add newremote https://git.test.com/somerepo") + cmd := NewCommand(fmt.Sprintf("git remote add %s %s", remote, remoteUrl)) cmd.Dir = dir _, err := cmd.Output() if err != nil { - fmt.Println("set git remote", err) + t.Logf("set git remote: %v", err) t.Fatal(err) } } @@ -55,7 +55,7 @@ func CreateGitRemoteBranch(t *testing.T, dir string, branch string, remote strin _, err := cmd.Output() if err != nil { - fmt.Println("create git branch", err) + t.Logf("create git branch: %v", err) t.Fatal(err) } } diff --git a/pkg/transformer/openshift/openshift_test.go b/pkg/transformer/openshift/openshift_test.go index 6b9e5720..322863ea 100644 --- a/pkg/transformer/openshift/openshift_test.go +++ b/pkg/transformer/openshift/openshift_test.go @@ -194,7 +194,7 @@ func TestGitGetCurrentBranch(t *testing.T) { if test.expectError { if err == nil { - t.Errorf("Expected error, got success instead!") + t.Error("Expected error, got success instead!") } } else { if err != nil { From 4d3347d3004658205bafd0b527e6627aab838ff3 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Mon, 26 Dec 2016 18:53:32 +0530 Subject: [PATCH 31/33] Remove dangling cli dir after rebase. --- cli/command/command.go | 272 ----------------------------------------- 1 file changed, 272 deletions(-) delete mode 100644 cli/command/command.go diff --git a/cli/command/command.go b/cli/command/command.go deleted file mode 100644 index f2b8ba93..00000000 --- a/cli/command/command.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package command - -import ( - "fmt" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/kubernetes-incubator/kompose/cli/app" - "github.com/urfave/cli" -) - -// Hook for erroring and exit out on warning -type errorOnWarningHook struct{} - -// array consisting of our common conversion flags that will get passed along -// for the autocomplete aspect -var ( - commonConvertFlagsList = []string{"out", "replicas", "yaml", "stdout", "emptyvols"} -) - -func (errorOnWarningHook) Levels() []logrus.Level { - return []logrus.Level{logrus.WarnLevel} -} - -func (errorOnWarningHook) Fire(entry *logrus.Entry) error { - logrus.Fatalln(entry.Message) - return nil -} - -// BeforeApp is an action that is executed before any cli command. -func BeforeApp(c *cli.Context) error { - - if c.GlobalBool("verbose") { - logrus.SetLevel(logrus.DebugLevel) - } else if c.GlobalBool("suppress-warnings") { - logrus.SetLevel(logrus.ErrorLevel) - } else if c.GlobalBool("error-on-warning") { - hook := errorOnWarningHook{} - logrus.AddHook(hook) - } - - // First command added was dummy convert command so removing it - c.App.Commands = c.App.Commands[1:] - provider := strings.ToLower(c.GlobalString("provider")) - switch provider { - case "kubernetes": - c.App.Commands = append(c.App.Commands, ConvertKubernetesCommand()) - case "openshift": - c.App.Commands = append(c.App.Commands, ConvertOpenShiftCommand()) - default: - logrus.Fatalf("Unknown provider. Supported providers are kubernetes and openshift.") - } - - return nil -} - -// When user tries out `kompose -h`, the convert option should be visible -// so adding a dummy `convert` command, real convert commands depending on Providers -// mentioned are added in `BeforeApp` function -func ConvertCommandDummy() cli.Command { - command := cli.Command{ - Name: "convert", - Usage: fmt.Sprintf("Convert Docker Compose file (e.g. %s) to Kubernetes/OpenShift objects", app.DefaultComposeFile), - } - return command -} - -// Generate the Bash completion flag taking the common flags plus whatever is -// passed into the function to correspond to the primary command specific args -func generateBashCompletion(args []string) { - commonArgs := []string{"bundle", "file", "suppress-warnings", "verbose", "error-on-warning", "provider"} - flags := append(commonArgs, args...) - - for _, f := range flags { - fmt.Printf("--%s\n", f) - } -} - -// ConvertKubernetesCommand defines the kompose convert subcommand for Kubernetes provider -func ConvertKubernetesCommand() cli.Command { - command := cli.Command{ - Name: "convert", - Usage: fmt.Sprintf("Convert Docker Compose file (e.g. %s) to Kubernetes objects", app.DefaultComposeFile), - Action: func(c *cli.Context) { - app.Convert(c) - }, - BashComplete: func(c *cli.Context) { - flags := []string{"chart", "deployment", "daemonset", "replicationcontroller"} - generateBashCompletion(append(flags, commonConvertFlagsList...)) - }, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "chart,c", - Usage: "Create a Helm chart for converted objects", - }, - cli.BoolFlag{ - Name: "deployment,d", - Usage: "Generate a Kubernetes deployment object (default on)", - }, - cli.BoolFlag{ - Name: "daemonset,ds", - Usage: "Generate a Kubernetes daemonset object", - }, - cli.BoolFlag{ - Name: "replicationcontroller,rc", - Usage: "Generate a Kubernetes replication controller object", - }, - }, - } - command.Flags = append(command.Flags, commonConvertFlags()...) - return command -} - -// ConvertOpenShiftCommand defines the kompose convert subcommand for OpenShift provider -func ConvertOpenShiftCommand() cli.Command { - command := cli.Command{ - Name: "convert", - Usage: fmt.Sprintf("Convert Docker Compose file (e.g. %s) to OpenShift objects", app.DefaultComposeFile), - Action: func(c *cli.Context) { - app.Convert(c) - }, - BashComplete: func(c *cli.Context) { - flags := []string{"deploymentconfig"} - generateBashCompletion(append(flags, commonConvertFlagsList...)) - }, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "deploymentconfig,dc", - Usage: "Generate a OpenShift DeploymentConfig object", - }, - cli.StringFlag{ - Name: "build-repo", - Value: "", - Usage: "Specify source repository for buildconfig (default remote origin)", - EnvVar: "BUILDREPO", - }, - cli.StringFlag{ - Name: "build-branch", - Value: "", - Usage: "Specify repository branch to use for buildconfig (default master)", - EnvVar: "BUILDBRANCH", - }, - }, - } - command.Flags = append(command.Flags, commonConvertFlags()...) - return command -} - -func commonConvertFlags() []cli.Flag { - return []cli.Flag{ - cli.StringFlag{ - Name: "out,o", - Usage: "Specify path to a file or a directory to save generated objects into. If path is a directory, the objects are stored in that directory. If path is a file, then objects are stored in that single file. File is created if it does not exist.", - EnvVar: "OUTPUT_FILE", - }, - cli.IntFlag{ - Name: "replicas", - Value: 1, - Usage: "Specify the number of replicas in the generated resource spec (default 1)", - }, - cli.BoolFlag{ - Name: "yaml, y", - Usage: "Generate resource file in yaml format", - }, - cli.BoolFlag{ - Name: "stdout", - Usage: "Print converted objects to stdout", - }, - cli.BoolFlag{ - Name: "emptyvols", - Usage: "Use Empty Volumes. Don't generate PVCs", - }, - } -} - -// UpCommand defines the kompose up subcommand. -func UpCommand() cli.Command { - return cli.Command{ - Name: "up", - Usage: "Deploy your Dockerized application to Kubernetes (default: creating Kubernetes deployment and service)", - Action: func(c *cli.Context) { - app.Up(c) - }, - BashComplete: func(c *cli.Context) { - flags := []string{"emptyvols"} - generateBashCompletion(flags) - }, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "emptyvols", - Usage: "Use Empty Volumes. Don't generate PVCs", - }, - }, - } -} - -// DownCommand defines the kompose down subcommand. -func DownCommand() cli.Command { - return cli.Command{ - Name: "down", - Usage: "Delete instantiated services/deployments from kubernetes", - Action: func(c *cli.Context) { - app.Down(c) - }, - BashComplete: func(c *cli.Context) { - flags := []string{"emptyvols"} - generateBashCompletion(flags) - }, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "emptyvols", - Usage: "Use Empty Volumes. Don't generate PVCs", - }, - }, - } -} - -// CommonFlags defines the flags that are in common for all subcommands. -func CommonFlags() []cli.Flag { - return []cli.Flag{ - cli.StringFlag{ - Name: "bundle,dab", - Usage: "Specify a Distributed Application Bundle (DAB) file", - EnvVar: "DAB_FILE", - }, - - cli.StringFlag{ - Name: "file,f", - Usage: fmt.Sprintf("Specify an alternative compose file (default: %s)", app.DefaultComposeFile), - Value: app.DefaultComposeFile, - EnvVar: "COMPOSE_FILE", - }, - // creating a flag to suppress warnings - cli.BoolFlag{ - Name: "suppress-warnings", - Usage: "Suppress all warnings", - }, - // creating a flag to show all kinds of warnings - cli.BoolFlag{ - Name: "verbose", - Usage: "Show all type of logs", - }, - // flag to treat any warning as error - cli.BoolFlag{ - Name: "error-on-warning", - Usage: "Treat any warning as error", - }, - // mention the end provider - cli.StringFlag{ - Name: "provider", - Usage: "Generate artifacts for this provider", - Value: app.DefaultProvider, - EnvVar: "PROVIDER", - }, - } -} From 7c959b65d4dbee082bd2fd7bed3bc801efcdff84 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 27 Dec 2016 20:07:03 +0530 Subject: [PATCH 32/33] Fixed typos in openshift buildconfig - spelling mistake - pass compose file dir instead of compose file to initBuildConfig call - Use as default value for cli --build-branch option - Pass current build branch to buildconfig related functions instead of opt.BuildBranch - Fix printing buildconfig source branch in logs. --- cmd/convert.go | 2 +- pkg/transformer/openshift/openshift.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/convert.go b/cmd/convert.go index 7d59f091..73cc43ec 100644 --- a/cmd/convert.go +++ b/cmd/convert.go @@ -88,7 +88,7 @@ func init() { convertCmd.Flags().MarkHidden("deployment-config") convertCmd.Flags().StringVar(&ConvertBuildRepo, "build-repo", "", "Specify source repository for buildconfig (default remote origin)") convertCmd.Flags().MarkHidden("build-repo") - convertCmd.Flags().StringVar(&ConvertBuildBranch, "build-branch", "master", "Specify repository branch to use for buildconfig (default master)") + convertCmd.Flags().StringVar(&ConvertBuildBranch, "build-branch", "", "Specify repository branch to use for buildconfig (default master)") convertCmd.Flags().MarkHidden("build-branch") // Standard between the two diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 5390ffce..27c6543f 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -170,7 +170,7 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) func initBuildConfig(name string, service kobject.ServiceConfig, composeFileDir string, repo string, branch string) *buildapi.BuildConfig { contextDir, err := getAbsBuildContext(service.Build, composeFileDir) if err != nil { - logrus.Fatalf("[%s] Buildconfig cannote be created due to error in creating build context.", name) + logrus.Fatalf("[%s] Buildconfig cannot be created due to error in creating build context.", name) } bc := &buildapi.BuildConfig{ @@ -356,7 +356,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } hasBuild = true } - objects = append(objects, initBuildConfig(name, service, opt.InputFile, buildRepo, opt.BuildBranch)) // Openshift BuildConfigs + objects = append(objects, initBuildConfig(name, service, composeFileDir, buildRepo, buildBranch)) // Openshift BuildConfigs } // If ports not provided in configuration we will not make service @@ -375,7 +375,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C } if hasBuild { - logrus.Infof("Buildconfig using %s::%s as source.", buildRepo, opt.BuildBranch) + logrus.Infof("Buildconfig using %s::%s as source.", buildRepo, buildBranch) } // If docker-compose has a volumes_from directive it will be handled here o.VolumesFrom(&allobjects, komposeObject) From 0d86f3e08754e8165ef2ae6d0306156ef84fe4d0 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 27 Dec 2016 20:22:49 +0530 Subject: [PATCH 33/33] Updated docs for openshift buildconfig feature - Added buildconfig doc in user guide. - Add inline code documentation to explain why buildconfig object needs to be created after imagestream, because of https://github.com/openshift/origin/issues/4518 --- docs/user-guide.md | 17 +++++++++++++++-- pkg/transformer/openshift/openshift.go | 1 + 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/user-guide.md b/docs/user-guide.md index baaaffbf..f61e09a6 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -95,7 +95,7 @@ INFO[0000] file "result-imagestream.json" created ``` In similar way you can convert DAB files to OpenShift. -```console$ +```console $ kompose --bundle docker-compose-bundle.dab --provider openshift convert WARN[0000]: Unsupported key networks - ignoring INFO[0000] file "redis-svc.json" created @@ -106,6 +106,19 @@ INFO[0000] file "redis-deploymentconfig.json" created INFO[0000] file "redis-imagestream.json" created ``` +It also supports creating buildconfig for build directive in a service. By default, it uses the remote repo for the current git branch as the source repo, and the current branch as the source branch for the build. You can specify a different source repo and branch using ``--build-repo`` and ``--build-branch`` options respectively. + +```console +kompose --provider openshift --file buildconfig/docker-compose.yml convert +WARN[0000] [foo] Service cannot be created because of missing port. +INFO[0000] Buildconfig using git@github.com:rtnpro/kompose.git::master as source. +INFO[0000] file "foo-deploymentconfig.json" created +INFO[0000] file "foo-imagestream.json" created +INFO[0000] file "foo-buildconfig.json" created +``` + +**Note**: If you are manually pushing the Openshift artifacts using ``oc create -f``, you need to ensure that you push the imagestream artifact before the buildconfig artifact, to workaround this Openshift issue: https://github.com/openshift/origin/issues/4518 . + ## Kompose up Kompose supports a straightforward way to deploy your "composed" application to Kubernetes or OpenShift via `kompose up`. @@ -354,4 +367,4 @@ services: mariadb: image: centos/mariadb restart: "no" -``` \ No newline at end of file +``` diff --git a/pkg/transformer/openshift/openshift.go b/pkg/transformer/openshift/openshift.go index 27c6543f..9ba75633 100644 --- a/pkg/transformer/openshift/openshift.go +++ b/pkg/transformer/openshift/openshift.go @@ -329,6 +329,7 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C objects = append(objects, o.initImageStream(name, service)) } + // buildconfig needs to be added to objects after imagestream because of this Openshift bug: https://github.com/openshift/origin/issues/4518 if service.Build != "" { if !hasBuild { composeFileDir, err = getComposeFileDir(opt.InputFile)