Merge pull request #206 from rtnpro/buildconfig

Generate buildconfig for Openshift
This commit is contained in:
Suraj Deshmukh 2016-12-28 21:08:38 +05:30 committed by GitHub
commit 9c3fdaa48d
29 changed files with 16356 additions and 155 deletions

View File

@ -26,12 +26,12 @@ import (
)
var (
ConvertSource, ConvertOut string
ConvertChart, ConvertDeployment, ConvertDaemonSet bool
ConvertReplicationController, ConvertYaml, ConvertStdout bool
ConvertEmptyVols, ConvertDeploymentConfig bool
ConvertReplicas int
ConvertOpt kobject.ConvertOptions
ConvertSource, ConvertOut, ConvertBuildRepo, ConvertBuildBranch string
ConvertChart, ConvertDeployment, ConvertDaemonSet bool
ConvertReplicationController, ConvertYaml, ConvertStdout bool
ConvertEmptyVols, ConvertDeploymentConfig, ConvertBuildConfig bool
ConvertReplicas int
ConvertOpt kobject.ConvertOptions
)
var ConvertProvider string = GlobalProvider
@ -53,6 +53,8 @@ var convertCmd = &cobra.Command{
CreateD: ConvertDeployment,
CreateDS: ConvertDaemonSet,
CreateRC: ConvertReplicationController,
BuildRepo: ConvertBuildRepo,
BuildBranch: ConvertBuildBranch,
CreateDeploymentConfig: ConvertDeploymentConfig,
EmptyVols: ConvertEmptyVols,
}
@ -84,6 +86,10 @@ func init() {
// OpenShift only
convertCmd.Flags().BoolVar(&ConvertDeploymentConfig, "deployment-config", true, "Generate an OpenShift deploymentconfig object")
convertCmd.Flags().MarkHidden("deployment-config")
convertCmd.Flags().StringVar(&ConvertBuildRepo, "build-repo", "", "Specify source repository for buildconfig (default remote origin)")
convertCmd.Flags().MarkHidden("build-repo")
convertCmd.Flags().StringVar(&ConvertBuildBranch, "build-branch", "", "Specify repository branch to use for buildconfig (default master)")
convertCmd.Flags().MarkHidden("build-branch")
// Standard between the two
convertCmd.Flags().BoolVarP(&ConvertYaml, "yaml", "y", false, "Generate resource files into yaml format")
@ -107,6 +113,8 @@ Available Commands:{{range .Commands}}{{if .IsAvailableCommand}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}}
Resource Flags:
--build-branch Specify repository branch to use for buildconfig (default is current branch name)
--build-repo Specify source repository for buildconfig (default is current branch's remote url
-c, --chart Create a Helm chart for converted objects
--daemon-set Generate a Kubernetes daemonset object
-d, --deployment Generate a Kubernetes deployment object

View File

@ -95,7 +95,7 @@ INFO[0000] file "result-imagestream.json" created
```
In similar way you can convert DAB files to OpenShift.
```console$
```console
$ kompose --bundle docker-compose-bundle.dab --provider openshift convert
WARN[0000]: Unsupported key networks - ignoring
INFO[0000] file "redis-svc.json" created
@ -106,6 +106,19 @@ INFO[0000] file "redis-deploymentconfig.json" created
INFO[0000] file "redis-imagestream.json" created
```
It also supports creating buildconfig for build directive in a service. By default, it uses the remote repo for the current git branch as the source repo, and the current branch as the source branch for the build. You can specify a different source repo and branch using ``--build-repo`` and ``--build-branch`` options respectively.
```console
kompose --provider openshift --file buildconfig/docker-compose.yml convert
WARN[0000] [foo] Service cannot be created because of missing port.
INFO[0000] Buildconfig using git@github.com:rtnpro/kompose.git::master as source.
INFO[0000] file "foo-deploymentconfig.json" created
INFO[0000] file "foo-imagestream.json" created
INFO[0000] file "foo-buildconfig.json" created
```
**Note**: If you are manually pushing the Openshift artifacts using ``oc create -f``, you need to ensure that you push the imagestream artifact before the buildconfig artifact, to workaround this Openshift issue: https://github.com/openshift/origin/issues/4518 .
## Kompose up
Kompose supports a straightforward way to deploy your "composed" application to Kubernetes or OpenShift via `kompose up`.
@ -354,4 +367,4 @@ services:
mariadb:
image: centos/mariadb
restart: "no"
```
```

View File

@ -0,0 +1,5 @@
FROM busybox
RUN touch /test

View File

@ -0,0 +1,7 @@
version: "2"
services:
foo:
build: "./build"
command: "sleep 3600"

8
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: ce7cedb6d1c0e23f02afdd8bc7d1dabb047ca846cacd597e436cf93549c9ac79
updated: 2016-12-22T09:07:17.203828556-05:00
hash: c7cb14f4249738a47020f9dc1964832921c3f5b8bf5a1c50f5b2fa15eaebb6fe
updated: 2016-12-26T10:22:49.439519344+05:30
imports:
- name: cloud.google.com/go
version: 3b1ae45394a234c385be014e9a488f2bb6eef821
@ -369,6 +369,10 @@ imports:
- pkg/auth/authenticator/request/x509request
- pkg/authorization/api
- pkg/build/api
- pkg/build/api/install
- pkg/build/api/v1
- pkg/build/client
- pkg/build/util
- pkg/client
- pkg/cmd/cli/config
- pkg/cmd/util

View File

@ -20,6 +20,8 @@ import:
- package: github.com/openshift/origin
version: v1.4.0-rc1
subpackages:
- pkg/build/api/install
- pkg/build/api/v1
- pkg/client
- pkg/cmd/cli/config
- pkg/deploy/api

View File

@ -28,6 +28,7 @@ import (
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
// install OpenShift api
_ "github.com/openshift/origin/pkg/build/api/install"
_ "github.com/openshift/origin/pkg/deploy/api/install"
_ "github.com/openshift/origin/pkg/image/api/install"
_ "github.com/openshift/origin/pkg/route/api/install"
@ -62,6 +63,8 @@ func ValidateFlags(bundle string, args []string, cmd *cobra.Command, opt *kobjec
// OpenShift specific flags
deploymentConfig := cmd.Flags().Lookup("deployment-config").Changed
buildRepo := cmd.Flags().Lookup("build-repo").Changed
buildBranch := cmd.Flags().Lookup("build-branch").Changed
// Kubernetes specific flags
chart := cmd.Flags().Lookup("chart").Changed
@ -88,6 +91,12 @@ func ValidateFlags(bundle string, args []string, cmd *cobra.Command, opt *kobjec
if deploymentConfig {
logrus.Fatalf("--deployment-config is an OpenShift only flag")
}
if buildRepo {
logrus.Fatalf("--build-repo is an Openshift only flag")
}
if buildBranch {
logrus.Fatalf("--build-branch is an Openshift only flag")
}
}
// Standard checks regardless of provider

View File

@ -33,6 +33,8 @@ type ConvertOptions struct {
CreateRC bool
CreateDS bool
CreateDeploymentConfig bool
BuildRepo string
BuildBranch string
CreateChart bool
GenerateYaml bool
EmptyVols bool

View File

@ -270,6 +270,7 @@ func (c *Compose) LoadFile(file string) kobject.KomposeObject {
if composeServiceConfig, ok := composeObject.ServiceConfigs.Get(name); ok {
serviceConfig := kobject.ServiceConfig{}
serviceConfig.Image = composeServiceConfig.Image
serviceConfig.Build = composeServiceConfig.Build.Context
serviceConfig.ContainerName = composeServiceConfig.ContainerName
serviceConfig.Command = composeServiceConfig.Entrypoint
serviceConfig.Args = composeServiceConfig.Command

71
pkg/testutils/git.go Normal file
View File

@ -0,0 +1,71 @@
package testutils
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"testing"
)
func NewCommand(cmd string) *exec.Cmd {
return exec.Command("sh", "-c", cmd)
}
func CreateLocalDirectory(t *testing.T) string {
dir, err := ioutil.TempDir(os.TempDir(), "kompose-test-")
if err != nil {
t.Fatal(err)
}
return dir
}
func CreateLocalGitDirectory(t *testing.T) string {
dir := CreateLocalDirectory(t)
cmd := NewCommand(
`git init && touch README &&
git add README &&
git commit -m 'testcommit'`)
cmd.Dir = dir
_, err := cmd.Output()
if err != nil {
t.Logf("create local git dir: %v", err)
t.Fatal(err)
}
return dir
}
func SetGitRemote(t *testing.T, dir string, remote string, remoteUrl string) {
cmd := NewCommand(fmt.Sprintf("git remote add %s %s", remote, remoteUrl))
cmd.Dir = dir
_, err := cmd.Output()
if err != nil {
t.Logf("set git remote: %v", err)
t.Fatal(err)
}
}
func CreateGitRemoteBranch(t *testing.T, dir string, branch string, remote string) {
cmd := NewCommand(
fmt.Sprintf(`git checkout -b %s &&
git config branch.%s.remote %s &&
git config branch.%s.merge refs/heads/%s`,
branch, branch, remote, branch, branch))
cmd.Dir = dir
_, err := cmd.Output()
if err != nil {
t.Logf("create git branch: %v", err)
t.Fatal(err)
}
}
func CreateSubdir(t *testing.T, dir string, subdir string) {
cmd := NewCommand(fmt.Sprintf("mkdir -p %s", subdir))
cmd.Dir = dir
_, err := cmd.Output()
if err != nil {
t.Fatal(err)
}
}

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/runtime"
buildapi "github.com/openshift/origin/pkg/build/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
imageapi "github.com/openshift/origin/pkg/image/api"
routeapi "github.com/openshift/origin/pkg/route/api"
@ -211,6 +212,8 @@ func PrintList(objects []runtime.Object, opt kobject.ConvertOptions) error {
file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f)
case *deployapi.DeploymentConfig:
file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f)
case *buildapi.BuildConfig:
file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f)
case *imageapi.ImageStream:
file = transformer.Print(t.Name, dirName, strings.ToLower(t.Kind), data, opt.ToStdout, opt.GenerateYaml, f)
case *api.Service:

View File

@ -27,6 +27,7 @@ import (
"github.com/fatih/structs"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
"github.com/kubernetes-incubator/kompose/pkg/transformer"
buildapi "github.com/openshift/origin/pkg/build/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
// install kubernetes api
@ -512,6 +513,8 @@ func (k *Kubernetes) UpdateController(obj runtime.Object, updateTemplate func(*a
updateTemplate(&p)
t.Spec = p.Spec
t.ObjectMeta = p.ObjectMeta
case *buildapi.BuildConfig:
updateMeta(&t.ObjectMeta)
}
}

View File

@ -18,6 +18,9 @@ package openshift
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
@ -26,6 +29,7 @@ import (
"github.com/Sirupsen/logrus"
"k8s.io/kubernetes/pkg/api"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/runtime"
@ -35,6 +39,7 @@ import (
"time"
buildapi "github.com/openshift/origin/pkg/build/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
deploymentconfigreaper "github.com/openshift/origin/pkg/deploy/cmd"
imageapi "github.com/openshift/origin/pkg/image/api"
@ -72,10 +77,80 @@ func getImageTag(image string) string {
}
}
// hasGitBinary checks if the 'git' binary is available on the system
func hasGitBinary() bool {
_, err := exec.LookPath("git")
return err == nil
}
// getGitCurrentRemoteUrl gets current git remote URI for the current git repo
func getGitCurrentRemoteUrl(composeFileDir string) (string, error) {
cmd := exec.Command("git", "ls-remote", "--get-url")
cmd.Dir = composeFileDir
out, err := cmd.Output()
if err != nil {
return "", err
}
url := strings.TrimRight(string(out), "\n")
if !strings.HasSuffix(url, ".git") {
url += ".git"
}
return url, nil
}
// getGitCurrentBranch gets current git branch name for the current git repo
func getGitCurrentBranch(composeFileDir string) (string, error) {
cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD")
cmd.Dir = composeFileDir
out, err := cmd.Output()
if err != nil {
return "", err
}
return strings.TrimRight(string(out), "\n"), nil
}
// getComposeFileDir returns compose file directory
func getComposeFileDir(inputFile string) (string, error) {
if strings.Index(inputFile, "/") != 0 {
workDir, err := os.Getwd()
if err != nil {
return "", err
}
inputFile = filepath.Join(workDir, inputFile)
}
return filepath.Dir(inputFile), nil
}
// getAbsBuildContext returns build context relative to project root dir
func getAbsBuildContext(context string, composeFileDir string) (string, error) {
cmd := exec.Command("git", "rev-parse", "--show-prefix")
cmd.Dir = composeFileDir
out, err := cmd.Output()
if err != nil {
return "", err
}
prefix := strings.Trim(string(out), "\n")
return filepath.Join(prefix, context), nil
}
// initImageStream initialize ImageStream object
func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig) *imageapi.ImageStream {
tag := getImageTag(service.Image)
var tags map[string]imageapi.TagReference
if service.Build == "" {
tags = map[string]imageapi.TagReference{
tag: imageapi.TagReference{
From: &api.ObjectReference{
Kind: "DockerImage",
Name: service.Image,
},
},
}
}
is := &imageapi.ImageStream{
TypeMeta: unversioned.TypeMeta{
Kind: "ImageStream",
@ -85,17 +160,54 @@ func (o *OpenShift) initImageStream(name string, service kobject.ServiceConfig)
Name: name,
},
Spec: imageapi.ImageStreamSpec{
Tags: map[string]imageapi.TagReference{
tag: imageapi.TagReference{
From: &api.ObjectReference{
Kind: "DockerImage",
Name: service.Image,
Tags: tags,
},
}
return is
}
// initBuildConfig initialize Openshifts BuildConfig Object
func initBuildConfig(name string, service kobject.ServiceConfig, composeFileDir string, repo string, branch string) *buildapi.BuildConfig {
contextDir, err := getAbsBuildContext(service.Build, composeFileDir)
if err != nil {
logrus.Fatalf("[%s] Buildconfig cannot be created due to error in creating build context.", name)
}
bc := &buildapi.BuildConfig{
TypeMeta: unversioned.TypeMeta{
Kind: "BuildConfig",
APIVersion: "v1",
},
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: buildapi.BuildConfigSpec{
Triggers: []buildapi.BuildTriggerPolicy{
{Type: "ConfigChange"},
{Type: "ImageChange"},
},
RunPolicy: "Serial",
CommonSpec: buildapi.CommonSpec{
Source: buildapi.BuildSource{
Git: &buildapi.GitBuildSource{
Ref: branch,
URI: repo,
},
ContextDir: contextDir,
},
Strategy: buildapi.BuildStrategy{
DockerStrategy: &buildapi.DockerBuildStrategy{},
},
Output: buildapi.BuildOutput{
To: &kapi.ObjectReference{
Kind: "ImageStreamTag",
Name: name + ":latest",
},
},
},
},
}
return is
return bc
}
// initDeploymentConfig initialize OpenShifts DeploymentConfig object
@ -195,6 +307,11 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C
}
// this will hold all the converted data
var allobjects []runtime.Object
var err error
var composeFileDir string
hasBuild := false
buildRepo := opt.BuildRepo
buildBranch := opt.BuildBranch
for name, service := range komposeObject.ServiceConfigs {
var objects []runtime.Object
@ -212,6 +329,37 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C
objects = append(objects, o.initImageStream(name, service))
}
// buildconfig needs to be added to objects after imagestream because of this Openshift bug: https://github.com/openshift/origin/issues/4518
if service.Build != "" {
if !hasBuild {
composeFileDir, err = getComposeFileDir(opt.InputFile)
if err != nil {
logrus.Warningf("Error in detecting compose file's directory.")
continue
}
if !hasGitBinary() && (buildRepo == "" || buildBranch == "") {
logrus.Fatalf("Git is not installed! Please install Git to create buildconfig, else supply source repository and branch to use for build using '--build-repo', '--build-branch' options respectively")
}
if buildBranch == "" {
buildBranch, err = getGitCurrentBranch(composeFileDir)
if err != nil {
logrus.Fatalf("Buildconfig cannot be created because current git branch couldn't be detected.")
}
}
if opt.BuildRepo == "" {
if err != nil {
logrus.Fatalf("Buildconfig cannot be created because remote for current git branch couldn't be detected.")
}
buildRepo, err = getGitCurrentRemoteUrl(composeFileDir)
if err != nil {
logrus.Fatalf("Buildconfig cannot be created because git remote origin repo couldn't be detected.")
}
}
hasBuild = true
}
objects = append(objects, initBuildConfig(name, service, composeFileDir, buildRepo, buildBranch)) // Openshift BuildConfigs
}
// If ports not provided in configuration we will not make service
if o.PortsExist(name, service) {
svc := o.CreateService(name, service, objects)
@ -226,6 +374,10 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C
allobjects = append(allobjects, objects...)
}
if hasBuild {
logrus.Infof("Buildconfig using %s::%s as source.", buildRepo, buildBranch)
}
// If docker-compose has a volumes_from directive it will be handled here
o.VolumesFrom(&allobjects, komposeObject)
// sort all object so Services are first
@ -273,6 +425,12 @@ func (o *OpenShift) Deploy(komposeObject kobject.KomposeObject, opt kobject.Conv
return err
}
logrus.Infof("Successfully created ImageStream: %s", t.Name)
case *buildapi.BuildConfig:
_, err := oclient.BuildConfigs(namespace).Create(t)
if err != nil {
return err
}
logrus.Infof("Successfully created BuildConfig: %s", t.Name)
case *deployapi.DeploymentConfig:
_, err := oclient.DeploymentConfigs(namespace).Create(t)
if err != nil {

View File

@ -17,11 +17,17 @@ limitations under the License.
package openshift
import (
"github.com/kubernetes-incubator/kompose/pkg/kobject"
deployapi "github.com/openshift/origin/pkg/deploy/api"
"os"
"path/filepath"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/runtime"
"testing"
deployapi "github.com/openshift/origin/pkg/deploy/api"
"github.com/kubernetes-incubator/kompose/pkg/kobject"
"github.com/kubernetes-incubator/kompose/pkg/testutils"
)
func newServiceConfig() kobject.ServiceConfig {
@ -118,5 +124,189 @@ func TestKomposeConvertRoute(t *testing.T) {
if route.Spec.Host != sc.ExposeService {
t.Errorf("Expected %s for Spec.Host, actual %s", sc.ExposeService, route.Spec.Host)
}
}
//Test getting git remote url for a directory
func TestGetGitRemote(t *testing.T) {
var output string
var err error
gitDir := testutils.CreateLocalGitDirectory(t)
testutils.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo")
testutils.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote")
dir := testutils.CreateLocalDirectory(t)
defer os.RemoveAll(gitDir)
defer os.RemoveAll(dir)
testCases := map[string]struct {
expectError bool
dir string
branch string
output string
}{
"Get git remote for branch success": {false, gitDir, "newbranch", "https://git.test.com/somerepo.git"},
"Get git remote error in non git dir": {true, dir, "", ""},
}
for name, test := range testCases {
t.Log("Test case: ", name)
output, err = getGitCurrentRemoteUrl(test.dir)
if test.expectError {
if err == nil {
t.Errorf("Expected error, got success instead!")
}
} else {
if err != nil {
t.Errorf("Expected success, got error: %v", err)
}
if output != test.output {
t.Errorf("Expected: %#v, got: %#v", test.output, output)
}
}
}
}
// Test getting current git branch in a directory
func TestGitGetCurrentBranch(t *testing.T) {
var output string
var err error
gitDir := testutils.CreateLocalGitDirectory(t)
testutils.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo")
testutils.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote")
dir := testutils.CreateLocalDirectory(t)
defer os.RemoveAll(gitDir)
defer os.RemoveAll(dir)
testCases := map[string]struct {
expectError bool
dir string
output string
}{
"Get git current branch success": {false, gitDir, "newbranch"},
"Get git current branch error": {true, dir, ""},
}
for name, test := range testCases {
t.Log("Test case: ", name)
output, err = getGitCurrentBranch(test.dir)
if test.expectError {
if err == nil {
t.Error("Expected error, got success instead!")
}
} else {
if err != nil {
t.Errorf("Expected success, got error: %v", err)
}
if output != test.output {
t.Errorf("Expected: %#v, got: %#v", test.output, output)
}
}
}
}
// Test getting compose file directory path: relative to project dir or absolute path
func TestGetComposeFileDir(t *testing.T) {
var output string
var err error
wd, _ := os.Getwd()
testCases := map[string]struct {
inputFile string
output string
}{
"Get compose file dir for relative input file path": {"foo/bar.yaml", filepath.Join(wd, "foo")},
"Get compose file dir for abs input file path": {"/abs/path/to/compose.yaml", "/abs/path/to"},
}
for name, test := range testCases {
t.Log("Test case: ", name)
output, err = getComposeFileDir(test.inputFile)
if err != nil {
t.Errorf("Expected success, got error: %#v", err)
}
if output != test.output {
t.Errorf("Expected output: %#v, got: %#v", test.output, output)
}
}
}
// Test getting build context relative to project's root dir
func TestGetAbsBuildContext(t *testing.T) {
var output string
var err error
gitDir := testutils.CreateLocalGitDirectory(t)
testutils.SetGitRemote(t, gitDir, "newremote", "https://git.test.com/somerepo")
testutils.CreateGitRemoteBranch(t, gitDir, "newbranch", "newremote")
testutils.CreateSubdir(t, gitDir, "a/b")
dir := testutils.CreateLocalDirectory(t)
defer os.RemoveAll(gitDir)
defer os.RemoveAll(dir)
testCases := map[string]struct {
expectError bool
context string
composeFileDir string
output string
}{
"Get abs build context success": {false, "./b/build", filepath.Join(gitDir, "a"), "a/b/build"},
"Get abs build context error": {true, "", dir, ""},
}
for name, test := range testCases {
t.Log("Test case: ", name)
output, err = getAbsBuildContext(test.context, test.composeFileDir)
if test.expectError {
if err == nil {
t.Errorf("Expected error, got success instead!")
}
} else {
if err != nil {
t.Errorf("Expected success, got error: %v", err)
}
if output != test.output {
t.Errorf("Expected: %#v, got: %#v", test.output, output)
}
}
}
}
// Test initializing buildconfig for a service
func TestInitBuildConfig(t *testing.T) {
dir := testutils.CreateLocalGitDirectory(t)
testutils.CreateSubdir(t, dir, "a/build")
defer os.RemoveAll(dir)
serviceName := "serviceA"
composeFileDir := filepath.Join(dir, "a")
repo := "https://git.test.com/org/repo"
branch := "somebranch"
sc := kobject.ServiceConfig{
Build: "./build",
}
bc := initBuildConfig(serviceName, sc, composeFileDir, repo, branch)
testCases := map[string]struct {
field string
value string
}{
"Assert buildconfig source git URI": {bc.Spec.CommonSpec.Source.Git.URI, repo},
"Assert buildconfig source git Ref": {bc.Spec.CommonSpec.Source.Git.Ref, branch},
"Assert buildconfig source context dir": {bc.Spec.CommonSpec.Source.ContextDir, "a/build"},
"Assert buildconfig output name": {bc.Spec.CommonSpec.Output.To.Name, serviceName + ":latest"},
}
for name, test := range testCases {
t.Log("Test case: ", name)
if test.field != test.value {
t.Errorf("Expected: %#v, got: %#v", test.value, test.field)
}
}
}

View File

@ -46,7 +46,7 @@ unset $(cat $KOMPOSE_ROOT/script/test/fixtures/gitlab/envs | cut -d'=' -f1)
# kubernetes test
convert::expect_success_and_warning "kompose -f $KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/docker-compose.yml convert --stdout" "$KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/output-k8s.json" "Kubernetes provider doesn't support build key - ignoring"
# openshift test
convert::expect_success "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/docker-compose.yml convert --stdout" "$KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/output-os.json"
convert::expect_success_warning "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/docker-compose.yml convert --stdout" "$KOMPOSE_ROOT/script/test/fixtures/ngnix-node-redis/output-os.json" "Buildconfig using https://github.com/kubernetes-incubator/kompose.git::master as source."
######
# Tests related to docker-compose file in /script/test/fixtures/entrypoint-command

View File

@ -3,33 +3,6 @@
"apiVersion": "v1",
"metadata": {},
"items": [
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"service": "node2"
}
},
"spec": {
"ports": [
{
"name": "8080",
"protocol": "TCP",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"service": "node2"
}
},
"status": {
"loadBalancer": {}
}
},
{
"kind": "Service",
"apiVersion": "v1",
@ -139,7 +112,7 @@
}
},
{
"kind": "DeploymentConfig",
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "node2",
@ -149,81 +122,20 @@
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
"ports": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"node2"
],
"from": {
"kind": "ImageStreamTag",
"name": "node2:latest"
}
}
"name": "8080",
"protocol": "TCP",
"port": 8080,
"targetPort": 8080
}
],
"replicas": 1,
"test": false,
"selector": {
"service": "node2"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"service": "node2"
}
},
"spec": {
"containers": [
{
"name": "node2",
"image": " ",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage"
},
"generation": null,
"importPolicy": {}
}
]
},
"status": {
"dockerImageRepository": ""
"loadBalancer": {}
}
},
{
@ -297,23 +209,53 @@
"name": "node3",
"creationTimestamp": null
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage"
},
"generation": null,
"importPolicy": {}
}
]
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "node3",
"creationTimestamp": null
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/kubernetes-incubator/kompose.git",
"ref": "master"
},
"contextDir": "script/test/fixtures/ngnix-node-redis/node"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "node3:latest"
}
},
"resources": {},
"postCommit": {}
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
@ -474,23 +416,53 @@
"name": "nginx",
"creationTimestamp": null
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage"
},
"generation": null,
"importPolicy": {}
}
]
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "nginx",
"creationTimestamp": null
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/kubernetes-incubator/kompose.git",
"ref": "master"
},
"contextDir": "script/test/fixtures/ngnix-node-redis/nginx"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "nginx:latest"
}
},
"resources": {},
"postCommit": {}
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
@ -562,22 +534,170 @@
"name": "node1",
"creationTimestamp": null
},
"spec": {
"tags": [
{
"name": "latest",
"annotations": null,
"from": {
"kind": "DockerImage"
},
"generation": null,
"importPolicy": {}
}
]
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "node1",
"creationTimestamp": null
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/kubernetes-incubator/kompose.git",
"ref": "master"
},
"contextDir": "script/test/fixtures/ngnix-node-redis/node"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "node1:latest"
}
},
"resources": {},
"postCommit": {}
},
"status": {
"lastVersion": 0
}
},
{
"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null,
"labels": {
"service": "node2"
}
},
"spec": {
"strategy": {
"resources": {}
},
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange",
"imageChangeParams": {
"automatic": true,
"containerNames": [
"node2"
],
"from": {
"kind": "ImageStreamTag",
"name": "node2:latest"
}
}
}
],
"replicas": 1,
"test": false,
"selector": {
"service": "node2"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"service": "node2"
}
},
"spec": {
"containers": [
{
"name": "node2",
"image": " ",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {}
}
],
"restartPolicy": "Always"
}
}
},
"status": {}
},
{
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null
},
"spec": {},
"status": {
"dockerImageRepository": ""
}
},
{
"kind": "BuildConfig",
"apiVersion": "v1",
"metadata": {
"name": "node2",
"creationTimestamp": null
},
"spec": {
"triggers": [
{
"type": "ConfigChange"
},
{
"type": "ImageChange"
}
],
"runPolicy": "Serial",
"source": {
"type": "Git",
"git": {
"uri": "https://github.com/kubernetes-incubator/kompose.git",
"ref": "master"
},
"contextDir": "script/test/fixtures/ngnix-node-redis/node"
},
"strategy": {
"type": "Docker",
"dockerStrategy": {}
},
"output": {
"to": {
"kind": "ImageStreamTag",
"name": "node2:latest"
}
},
"resources": {},
"postCommit": {}
},
"status": {
"lastVersion": 0
}
}
]
}

View File

@ -0,0 +1,109 @@
package install
import (
"fmt"
"github.com/golang/glog"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/openshift/origin/pkg/build/api"
"github.com/openshift/origin/pkg/build/api/v1"
)
const importPrefix = "github.com/openshift/origin/pkg/build/api"
var accessor = meta.NewAccessor()
// availableVersions lists all known external versions for this group from most preferred to least preferred
var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion}
func init() {
registered.RegisterVersions(availableVersions)
externalVersions := []unversioned.GroupVersion{}
for _, v := range availableVersions {
if registered.IsAllowedVersion(v) {
externalVersions = append(externalVersions, v)
}
}
if len(externalVersions) == 0 {
glog.Infof("No version is registered for group %v", api.GroupName)
return
}
if err := registered.EnableVersions(externalVersions...); err != nil {
panic(err)
}
if err := enableVersions(externalVersions); err != nil {
panic(err)
}
}
// TODO: enableVersions should be centralized rather than spread in each API
// group.
// We can combine registered.RegisterVersions, registered.EnableVersions and
// registered.RegisterGroup once we have moved enableVersions there.
func enableVersions(externalVersions []unversioned.GroupVersion) error {
addVersionsToScheme(externalVersions...)
preferredExternalVersion := externalVersions[0]
groupMeta := apimachinery.GroupMeta{
GroupVersion: preferredExternalVersion,
GroupVersions: externalVersions,
RESTMapper: newRESTMapper(externalVersions),
SelfLinker: runtime.SelfLinker(accessor),
InterfacesFor: interfacesFor,
}
if err := registered.RegisterGroup(groupMeta); err != nil {
return err
}
kapi.RegisterRESTMapper(groupMeta.RESTMapper)
return nil
}
func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) {
// add the internal version to Scheme
api.AddToScheme(kapi.Scheme)
// add the enabled external versions to Scheme
for _, v := range externalVersions {
if !registered.IsEnabledVersion(v) {
glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v)
continue
}
switch v {
case v1.SchemeGroupVersion:
v1.AddToScheme(kapi.Scheme)
default:
glog.Errorf("Version %s is not known, so it will not be added to the Scheme.", v)
continue
}
}
}
func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
rootScoped := sets.NewString()
ignoredKinds := sets.NewString()
return kapi.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
}
func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
switch version {
case v1.SchemeGroupVersion:
return &meta.VersionInterfaces{
ObjectConvertor: kapi.Scheme,
MetadataAccessor: accessor,
}, nil
default:
g, _ := registered.Group(api.GroupName)
return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions)
}
}

View File

@ -0,0 +1,189 @@
package v1
import (
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
oapi "github.com/openshift/origin/pkg/api"
newer "github.com/openshift/origin/pkg/build/api"
buildutil "github.com/openshift/origin/pkg/build/util"
imageapi "github.com/openshift/origin/pkg/image/api"
)
func Convert_v1_BuildConfig_To_api_BuildConfig(in *BuildConfig, out *newer.BuildConfig, s conversion.Scope) error {
if err := autoConvert_v1_BuildConfig_To_api_BuildConfig(in, out, s); err != nil {
return err
}
newTriggers := []newer.BuildTriggerPolicy{}
// strip off any default imagechange triggers where the buildconfig's
// "from" is not an ImageStreamTag, because those triggers
// will never be invoked.
imageRef := buildutil.GetInputReference(out.Spec.Strategy)
hasIST := imageRef != nil && imageRef.Kind == "ImageStreamTag"
for _, trigger := range out.Spec.Triggers {
if trigger.Type != newer.ImageChangeBuildTriggerType {
newTriggers = append(newTriggers, trigger)
continue
}
if (trigger.ImageChange == nil || trigger.ImageChange.From == nil) && !hasIST {
continue
}
newTriggers = append(newTriggers, trigger)
}
out.Spec.Triggers = newTriggers
return nil
}
func Convert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy(in *SourceBuildStrategy, out *newer.SourceBuildStrategy, s conversion.Scope) error {
if err := autoConvert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy(in, out, s); err != nil {
return err
}
switch in.From.Kind {
case "ImageStream":
out.From.Kind = "ImageStreamTag"
out.From.Name = imageapi.JoinImageStreamTag(in.From.Name, "")
}
return nil
}
func Convert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy(in *DockerBuildStrategy, out *newer.DockerBuildStrategy, s conversion.Scope) error {
if err := autoConvert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy(in, out, s); err != nil {
return err
}
if in.From != nil {
switch in.From.Kind {
case "ImageStream":
out.From.Kind = "ImageStreamTag"
out.From.Name = imageapi.JoinImageStreamTag(in.From.Name, "")
}
}
return nil
}
func Convert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy(in *CustomBuildStrategy, out *newer.CustomBuildStrategy, s conversion.Scope) error {
if err := autoConvert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy(in, out, s); err != nil {
return err
}
switch in.From.Kind {
case "ImageStream":
out.From.Kind = "ImageStreamTag"
out.From.Name = imageapi.JoinImageStreamTag(in.From.Name, "")
}
return nil
}
func Convert_v1_BuildOutput_To_api_BuildOutput(in *BuildOutput, out *newer.BuildOutput, s conversion.Scope) error {
if err := autoConvert_v1_BuildOutput_To_api_BuildOutput(in, out, s); err != nil {
return err
}
if in.To != nil && (in.To.Kind == "ImageStream" || len(in.To.Kind) == 0) {
out.To.Kind = "ImageStreamTag"
out.To.Name = imageapi.JoinImageStreamTag(in.To.Name, "")
}
return nil
}
func Convert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy(in *BuildTriggerPolicy, out *newer.BuildTriggerPolicy, s conversion.Scope) error {
if err := autoConvert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy(in, out, s); err != nil {
return err
}
switch in.Type {
case ImageChangeBuildTriggerTypeDeprecated:
out.Type = newer.ImageChangeBuildTriggerType
case GenericWebHookBuildTriggerTypeDeprecated:
out.Type = newer.GenericWebHookBuildTriggerType
case GitHubWebHookBuildTriggerTypeDeprecated:
out.Type = newer.GitHubWebHookBuildTriggerType
}
return nil
}
func Convert_api_SourceRevision_To_v1_SourceRevision(in *newer.SourceRevision, out *SourceRevision, s conversion.Scope) error {
if err := autoConvert_api_SourceRevision_To_v1_SourceRevision(in, out, s); err != nil {
return err
}
out.Type = BuildSourceGit
return nil
}
func Convert_api_BuildSource_To_v1_BuildSource(in *newer.BuildSource, out *BuildSource, s conversion.Scope) error {
if err := autoConvert_api_BuildSource_To_v1_BuildSource(in, out, s); err != nil {
return err
}
switch {
// it is legal for a buildsource to have both a git+dockerfile source, but in v1 that was represented
// as type git.
case in.Git != nil:
out.Type = BuildSourceGit
// it is legal for a buildsource to have both a binary+dockerfile source, but in v1 that was represented
// as type binary.
case in.Binary != nil:
out.Type = BuildSourceBinary
case in.Dockerfile != nil:
out.Type = BuildSourceDockerfile
case len(in.Images) > 0:
out.Type = BuildSourceImage
default:
out.Type = BuildSourceNone
}
return nil
}
func Convert_api_BuildStrategy_To_v1_BuildStrategy(in *newer.BuildStrategy, out *BuildStrategy, s conversion.Scope) error {
if err := autoConvert_api_BuildStrategy_To_v1_BuildStrategy(in, out, s); err != nil {
return err
}
switch {
case in.SourceStrategy != nil:
out.Type = SourceBuildStrategyType
case in.DockerStrategy != nil:
out.Type = DockerBuildStrategyType
case in.CustomStrategy != nil:
out.Type = CustomBuildStrategyType
case in.JenkinsPipelineStrategy != nil:
out.Type = JenkinsPipelineBuildStrategyType
default:
out.Type = ""
}
return nil
}
func addConversionFuncs(scheme *runtime.Scheme) error {
if err := scheme.AddConversionFuncs(
Convert_v1_BuildConfig_To_api_BuildConfig,
Convert_api_BuildConfig_To_v1_BuildConfig,
Convert_v1_SourceBuildStrategy_To_api_SourceBuildStrategy,
Convert_api_SourceBuildStrategy_To_v1_SourceBuildStrategy,
Convert_v1_DockerBuildStrategy_To_api_DockerBuildStrategy,
Convert_api_DockerBuildStrategy_To_v1_DockerBuildStrategy,
Convert_v1_CustomBuildStrategy_To_api_CustomBuildStrategy,
Convert_api_CustomBuildStrategy_To_v1_CustomBuildStrategy,
Convert_v1_BuildOutput_To_api_BuildOutput,
Convert_api_BuildOutput_To_v1_BuildOutput,
Convert_v1_BuildTriggerPolicy_To_api_BuildTriggerPolicy,
Convert_api_BuildTriggerPolicy_To_v1_BuildTriggerPolicy,
Convert_v1_SourceRevision_To_api_SourceRevision,
Convert_api_SourceRevision_To_v1_SourceRevision,
Convert_v1_BuildSource_To_api_BuildSource,
Convert_api_BuildSource_To_v1_BuildSource,
Convert_v1_BuildStrategy_To_api_BuildStrategy,
Convert_api_BuildStrategy_To_v1_BuildStrategy,
); err != nil {
return err
}
if err := scheme.AddFieldLabelConversionFunc("v1", "Build",
oapi.GetFieldLabelConversionFunc(newer.BuildToSelectableFields(&newer.Build{}), map[string]string{"name": "metadata.name"}),
); err != nil {
return err
}
if err := scheme.AddFieldLabelConversionFunc("v1", "BuildConfig",
oapi.GetFieldLabelConversionFunc(newer.BuildConfigToSelectableFields(&newer.BuildConfig{}), map[string]string{"name": "metadata.name"}),
); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,57 @@
package v1
import "k8s.io/kubernetes/pkg/runtime"
func SetDefaults_BuildConfigSpec(config *BuildConfigSpec) {
if len(config.RunPolicy) == 0 {
config.RunPolicy = BuildRunPolicySerial
}
}
func SetDefaults_BuildSource(source *BuildSource) {
if (source != nil) && (source.Type == BuildSourceBinary) && (source.Binary == nil) {
source.Binary = &BinaryBuildSource{}
}
}
func SetDefaults_BuildStrategy(strategy *BuildStrategy) {
if (strategy != nil) && (strategy.Type == DockerBuildStrategyType) && (strategy.DockerStrategy == nil) {
strategy.DockerStrategy = &DockerBuildStrategy{}
}
}
func SetDefaults_SourceBuildStrategy(obj *SourceBuildStrategy) {
if len(obj.From.Kind) == 0 {
obj.From.Kind = "ImageStreamTag"
}
}
func SetDefaults_DockerBuildStrategy(obj *DockerBuildStrategy) {
if obj.From != nil && len(obj.From.Kind) == 0 {
obj.From.Kind = "ImageStreamTag"
}
}
func SetDefaults_CustomBuildStrategy(obj *CustomBuildStrategy) {
if len(obj.From.Kind) == 0 {
obj.From.Kind = "ImageStreamTag"
}
}
func SetDefaults_BuildTriggerPolicy(obj *BuildTriggerPolicy) {
if obj.Type == ImageChangeBuildTriggerType && obj.ImageChange == nil {
obj.ImageChange = &ImageChangeTrigger{}
}
}
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return scheme.AddDefaultingFuncs(
SetDefaults_BuildConfigSpec,
SetDefaults_BuildSource,
SetDefaults_BuildStrategy,
SetDefaults_SourceBuildStrategy,
SetDefaults_DockerBuildStrategy,
SetDefaults_CustomBuildStrategy,
SetDefaults_BuildTriggerPolicy,
)
}

View File

@ -0,0 +1,5 @@
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/api
// Package v1 is the v1 version of the API.
package v1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
package v1
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
)
const GroupName = ""
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs, addDefaultingFuncs)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Build{},
&BuildList{},
&BuildConfig{},
&BuildConfigList{},
&BuildLog{},
&BuildRequest{},
&BuildLogOptions{},
&BinaryBuildRequestOptions{},
)
return nil
}
func (obj *Build) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildConfigList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildLog) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BuildLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }
func (obj *BinaryBuildRequestOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta }

View File

@ -0,0 +1,480 @@
package v1
// This file contains methods that can be used by the go-restful package to generate Swagger
// documentation for the object types found in 'types.go' This file is automatically generated
// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift.
// ==== DO NOT EDIT THIS FILE MANUALLY ====
var map_BinaryBuildRequestOptions = map[string]string{
"": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request",
"metadata": "metadata for BinaryBuildRequestOptions.",
"asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive",
"revision.commit": "revision.commit is the value identifying a specific commit",
"revision.message": "revision.message is the description of a specific commit",
"revision.authorName": "revision.authorName of the source control user",
"revision.authorEmail": "revision.authorEmail of the source control user",
"revision.committerName": "revision.committerName of the source control user",
"revision.committerEmail": "revision.committerEmail of the source control user",
}
func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string {
return map_BinaryBuildRequestOptions
}
var map_BinaryBuildSource = map[string]string{
"": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.",
"asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.",
}
func (BinaryBuildSource) SwaggerDoc() map[string]string {
return map_BinaryBuildSource
}
var map_Build = map[string]string{
"": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.",
"metadata": "Standard object's metadata.",
"spec": "spec is all the inputs used to execute the build.",
"status": "status is the current status of the build.",
}
func (Build) SwaggerDoc() map[string]string {
return map_Build
}
var map_BuildConfig = map[string]string{
"": "Build configurations define a build process for new Docker images. There are three types of builds possible - a Docker build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary Docker images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the Docker registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.",
"metadata": "metadata for BuildConfig.",
"spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.",
"status": "status holds any relevant information about a build config",
}
func (BuildConfig) SwaggerDoc() map[string]string {
return map_BuildConfig
}
var map_BuildConfigList = map[string]string{
"": "BuildConfigList is a collection of BuildConfigs.",
"metadata": "metadata for BuildConfigList.",
"items": "items is a list of build configs",
}
func (BuildConfigList) SwaggerDoc() map[string]string {
return map_BuildConfigList
}
var map_BuildConfigSpec = map[string]string{
"": "BuildConfigSpec describes when and how builds are created",
"triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.",
"runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".",
}
func (BuildConfigSpec) SwaggerDoc() map[string]string {
return map_BuildConfigSpec
}
var map_BuildConfigStatus = map[string]string{
"": "BuildConfigStatus contains current state of the build config object.",
"lastVersion": "lastVersion is used to inform about number of last triggered build.",
}
func (BuildConfigStatus) SwaggerDoc() map[string]string {
return map_BuildConfigStatus
}
var map_BuildList = map[string]string{
"": "BuildList is a collection of Builds.",
"metadata": "metadata for BuildList.",
"items": "items is a list of builds",
}
func (BuildList) SwaggerDoc() map[string]string {
return map_BuildList
}
var map_BuildLog = map[string]string{
"": "BuildLog is the (unused) resource associated with the build log redirector",
}
func (BuildLog) SwaggerDoc() map[string]string {
return map_BuildLog
}
var map_BuildLogOptions = map[string]string{
"": "BuildLogOptions is the REST options for a build log",
"container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.",
"follow": "follow if true indicates that the build log should be streamed until the build terminates.",
"previous": "previous returns previous build logs. Defaults to false.",
"sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
"timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
"tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
"limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
"nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.",
"version": "version of the build for which to view logs.",
}
func (BuildLogOptions) SwaggerDoc() map[string]string {
return map_BuildLogOptions
}
var map_BuildOutput = map[string]string{
"": "BuildOutput is input to a build strategy and describes the Docker image that the strategy should produce.",
"to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a Docker image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.",
"pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).",
"imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.",
}
func (BuildOutput) SwaggerDoc() map[string]string {
return map_BuildOutput
}
var map_BuildPostCommitSpec = map[string]string{
"": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n \"postCommit\": {\n \"script\": \"rake test --verbose\",\n }\n\n The above is a convenient form which is equivalent to:\n\n \"postCommit\": {\n \"command\": [\"/bin/sh\", \"-ic\"],\n \"args\": [\"rake test --verbose\"]\n }\n\n2. A command as the image entrypoint:\n\n \"postCommit\": {\n \"commit\": [\"rake\", \"test\", \"--verbose\"]\n }\n\n Command overrides the image entrypoint in the exec form, as documented in\n Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n \"postCommit\": {\n\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n \"postCommit\": {\n \"script\": \"rake test $1\",\n \"args\": [\"--verbose\"]\n }\n\n This form is useful if you need to pass arguments that would otherwise be\n hard to quote properly in the shell script. In the script, $0 will be\n \"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n \"postCommit\": {\n \"command\": [\"rake\", \"test\"],\n \"args\": [\"--verbose\"]\n }\n\n This form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.",
"command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.",
"args": "args is a list of arguments that are provided to either Command, Script or the Docker image's default entrypoint. The arguments are placed immediately after the command to be run.",
"script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.",
}
func (BuildPostCommitSpec) SwaggerDoc() map[string]string {
return map_BuildPostCommitSpec
}
var map_BuildRequest = map[string]string{
"": "BuildRequest is the resource used to pass parameters to build generator",
"metadata": "metadata for BuildRequest.",
"revision": "revision is the information from the source for a specific repo snapshot.",
"triggeredByImage": "triggeredByImage is the Image that triggered this build.",
"from": "from is the reference to the ImageStreamTag that triggered the build.",
"binary": "binary indicates a request to build from a binary provided to the builder",
"lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.",
"env": "env contains additional environment variables you want to pass into a builder container",
"triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.",
}
func (BuildRequest) SwaggerDoc() map[string]string {
return map_BuildRequest
}
var map_BuildSource = map[string]string{
"": "BuildSource is the SCM used for the build.",
"type": "type of build input to accept",
"binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and Docker builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.",
"dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.",
"git": "git contains optional information about git build source",
"images": "images describes a set of images to be used to provide source for the build",
"contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.",
"sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.",
"secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.",
}
func (BuildSource) SwaggerDoc() map[string]string {
return map_BuildSource
}
var map_BuildSpec = map[string]string{
"": "BuildSpec has the information to represent a build and also additional information about a build",
"triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.",
}
func (BuildSpec) SwaggerDoc() map[string]string {
return map_BuildSpec
}
var map_BuildStatus = map[string]string{
"": "BuildStatus contains the status of a build",
"phase": "phase is the point in the build lifecycle.",
"cancelled": "cancelled describes if a cancel event was triggered for the build.",
"reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
"message": "message is a human-readable message indicating details about why the build has this status.",
"startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.",
"completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.",
"duration": "duration contains time.Duration object describing build time.",
"outputDockerImageReference": "outputDockerImageReference contains a reference to the Docker image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.",
"config": "config is an ObjectReference to the BuildConfig this Build is based on.",
}
func (BuildStatus) SwaggerDoc() map[string]string {
return map_BuildStatus
}
var map_BuildStrategy = map[string]string{
"": "BuildStrategy contains the details of how to perform a build.",
"type": "type is the kind of build strategy.",
"dockerStrategy": "dockerStrategy holds the parameters to the Docker build strategy.",
"sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.",
"customStrategy": "customStrategy holds the parameters to the Custom build strategy",
"jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. This strategy is in tech preview.",
}
func (BuildStrategy) SwaggerDoc() map[string]string {
return map_BuildStrategy
}
var map_BuildTriggerCause = map[string]string{
"": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.",
"message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.",
"genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.",
"githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.",
"imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.",
}
func (BuildTriggerCause) SwaggerDoc() map[string]string {
return map_BuildTriggerCause
}
var map_BuildTriggerPolicy = map[string]string{
"": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.",
"type": "type is the type of build trigger",
"github": "github contains the parameters for a GitHub webhook type of trigger",
"generic": "generic contains the parameters for a Generic webhook type of trigger",
"imageChange": "imageChange contains parameters for an ImageChange type of trigger",
}
func (BuildTriggerPolicy) SwaggerDoc() map[string]string {
return map_BuildTriggerPolicy
}
var map_CommonSpec = map[string]string{
"": "CommonSpec encapsulates all the inputs necessary to represent a build.",
"serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount",
"source": "source describes the SCM in use.",
"revision": "revision is the information from the source for a specific repo snapshot. This is optional.",
"strategy": "strategy defines how to perform a build.",
"output": "output describes the Docker image the Strategy should produce.",
"resources": "resources computes resource requirements to execute the build.",
"postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.",
"completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer",
"nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.",
}
func (CommonSpec) SwaggerDoc() map[string]string {
return map_CommonSpec
}
var map_CustomBuildStrategy = map[string]string{
"": "CustomBuildStrategy defines input parameters specific to Custom build.",
"from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled",
"pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries",
"env": "env contains additional environment variables you want to pass into a builder container",
"exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build Docker images) from inside the Docker container.",
"forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally",
"secrets": "secrets is a list of additional secrets that will be included in the build pod",
"buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder",
}
func (CustomBuildStrategy) SwaggerDoc() map[string]string {
return map_CustomBuildStrategy
}
var map_DockerBuildStrategy = map[string]string{
"": "DockerBuildStrategy defines input parameters specific to Docker build.",
"from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled the resulting image will be used in the FROM line of the Dockerfile for this build.",
"pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries",
"noCache": "noCache if set to true indicates that the docker build must be executed with the --no-cache=true flag",
"env": "env contains additional environment variables you want to pass into a builder container",
"forcePull": "forcePull describes if the builder should pull the images from registry prior to building.",
"dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the Docker image, relative to the root of the context (contextDir).",
}
func (DockerBuildStrategy) SwaggerDoc() map[string]string {
return map_DockerBuildStrategy
}
var map_GenericWebHookCause = map[string]string{
"": "GenericWebHookCause holds information about a generic WebHook that triggered a build.",
"revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.",
"secret": "secret is the obfuscated webhook secret that triggered a build.",
}
func (GenericWebHookCause) SwaggerDoc() map[string]string {
return map_GenericWebHookCause
}
var map_GenericWebHookEvent = map[string]string{
"": "GenericWebHookEvent is the payload expected for a generic webhook post",
"type": "type is the type of source repository",
"git": "git is the git information if the Type is BuildSourceGit",
"env": "env contains additional environment variables you want to pass into a builder container",
}
func (GenericWebHookEvent) SwaggerDoc() map[string]string {
return map_GenericWebHookEvent
}
var map_GitBuildSource = map[string]string{
"": "GitBuildSource defines the parameters of a Git SCM",
"uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run",
"ref": "ref is the branch/tag/ref to build.",
}
func (GitBuildSource) SwaggerDoc() map[string]string {
return map_GitBuildSource
}
var map_GitHubWebHookCause = map[string]string{
"": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.",
"revision": "revision is the git revision information of the trigger.",
"secret": "secret is the obfuscated webhook secret that triggered a build.",
}
func (GitHubWebHookCause) SwaggerDoc() map[string]string {
return map_GitHubWebHookCause
}
var map_GitInfo = map[string]string{
"": "GitInfo is the aggregated git information for a generic webhook post",
}
func (GitInfo) SwaggerDoc() map[string]string {
return map_GitInfo
}
var map_GitSourceRevision = map[string]string{
"": "GitSourceRevision is the commit information from a git source for a build",
"commit": "commit is the commit hash identifying a specific commit",
"author": "author is the author of a specific commit",
"committer": "committer is the committer of a specific commit",
"message": "message is the description of a specific commit",
}
func (GitSourceRevision) SwaggerDoc() map[string]string {
return map_GitSourceRevision
}
var map_ImageChangeCause = map[string]string{
"": "ImageChangeCause contains information about the image that triggered a build",
"imageID": "imageID is the ID of the image that triggered a a new build.",
"fromRef": "fromRef contains detailed information about an image that triggered a build.",
}
func (ImageChangeCause) SwaggerDoc() map[string]string {
return map_ImageChangeCause
}
var map_ImageChangeTrigger = map[string]string{
"": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes",
"lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build",
"from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.",
}
func (ImageChangeTrigger) SwaggerDoc() map[string]string {
return map_ImageChangeTrigger
}
var map_ImageLabel = map[string]string{
"": "ImageLabel represents a label applied to the resulting image.",
"name": "name defines the name of the label. It must have non-zero length.",
"value": "value defines the literal value of the label.",
}
func (ImageLabel) SwaggerDoc() map[string]string {
return map_ImageLabel
}
var map_ImageSource = map[string]string{
"": "ImageSource is used to describe build source that will be extracted from an image. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. A list of paths to copy from the image and their respective destination within the build directory must be specified in the paths array.",
"from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.",
"paths": "paths is a list of source and destination paths to copy from the image.",
"pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.",
}
func (ImageSource) SwaggerDoc() map[string]string {
return map_ImageSource
}
var map_ImageSourcePath = map[string]string{
"": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.",
"sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory.",
"destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.",
}
func (ImageSourcePath) SwaggerDoc() map[string]string {
return map_ImageSourcePath
}
var map_JenkinsPipelineBuildStrategy = map[string]string{
"": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. This strategy is in tech preview.",
"jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.",
"jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.",
}
func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string {
return map_JenkinsPipelineBuildStrategy
}
var map_ProxyConfig = map[string]string{
"": "ProxyConfig defines what proxies to use for an operation",
"httpProxy": "httpProxy is a proxy used to reach the git repository over http",
"httpsProxy": "httpsProxy is a proxy used to reach the git repository over https",
"noProxy": "noProxy is the list of domains for which the proxy should not be used",
}
func (ProxyConfig) SwaggerDoc() map[string]string {
return map_ProxyConfig
}
var map_SecretBuildSource = map[string]string{
"": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.",
"secret": "secret is a reference to an existing secret that you want to use in your build.",
"destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the Docker build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during docker build.",
}
func (SecretBuildSource) SwaggerDoc() map[string]string {
return map_SecretBuildSource
}
var map_SecretSpec = map[string]string{
"": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point",
"secretSource": "secretSource is a reference to the secret",
"mountPath": "mountPath is the path at which to mount the secret",
}
func (SecretSpec) SwaggerDoc() map[string]string {
return map_SecretSpec
}
var map_SourceBuildStrategy = map[string]string{
"": "SourceBuildStrategy defines input parameters specific to an Source build.",
"from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the docker image should be pulled",
"pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the Docker images from the private Docker registries",
"env": "env contains additional environment variables you want to pass into a builder container",
"scripts": "scripts is the location of Source scripts",
"incremental": "incremental flag forces the Source build to do incremental builds if true.",
"forcePull": "forcePull describes if the builder should pull the images from registry prior to building.",
"runtimeImage": "runtimeImage is an optional image that is used to run an application without unneeded dependencies installed. The building of the application is still done in the builder image but, post build, you can copy the needed artifacts in the runtime image for use. This field and the feature it enables are in tech preview.",
"runtimeArtifacts": "runtimeArtifacts specifies a list of source/destination pairs that will be copied from the builder to the runtime image. sourcePath can be a file or directory. destinationDir must be a directory. destinationDir can also be empty or equal to \".\", in this case it just refers to the root of WORKDIR. This field and the feature it enables are in tech preview.",
}
func (SourceBuildStrategy) SwaggerDoc() map[string]string {
return map_SourceBuildStrategy
}
var map_SourceControlUser = map[string]string{
"": "SourceControlUser defines the identity of a user of source control",
"name": "name of the source control user",
"email": "email of the source control user",
}
func (SourceControlUser) SwaggerDoc() map[string]string {
return map_SourceControlUser
}
var map_SourceRevision = map[string]string{
"": "SourceRevision is the revision or commit information from the source for the build",
"type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'",
"git": "Git contains information about git-based build source",
}
func (SourceRevision) SwaggerDoc() map[string]string {
return map_SourceRevision
}
var map_WebHookTrigger = map[string]string{
"": "WebHookTrigger is a trigger that gets invoked using a webhook type of post",
"secret": "secret used to validate requests.",
"allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.",
}
func (WebHookTrigger) SwaggerDoc() map[string]string {
return map_WebHookTrigger
}

View File

@ -0,0 +1,928 @@
package v1
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api/unversioned"
kapi "k8s.io/kubernetes/pkg/api/v1"
)
// +genclient=true
// Build encapsulates the inputs needed to produce a new deployable image, as well as
// the status of the execution and a reference to the Pod which executed the build.
type Build struct {
unversioned.TypeMeta `json:",inline"`
// Standard object's metadata.
kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec is all the inputs used to execute the build.
Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// status is the current status of the build.
Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// BuildSpec has the information to represent a build and also additional
// information about a build
type BuildSpec struct {
// CommonSpec is the information that represents a build
CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"`
// triggeredBy describes which triggers started the most recent update to the
// build configuration and contains information about those triggers.
TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,2,rep,name=triggeredBy"`
}
// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset.
// +protobuf.nullable=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
type OptionalNodeSelector map[string]string
func (t OptionalNodeSelector) String() string {
return fmt.Sprintf("%v", map[string]string(t))
}
// CommonSpec encapsulates all the inputs necessary to represent a build.
type CommonSpec struct {
// serviceAccount is the name of the ServiceAccount to use to run the pod
// created by this build.
// The pod will be allowed to use secrets referenced by the ServiceAccount
ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"`
// source describes the SCM in use.
Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"`
// revision is the information from the source for a specific repo snapshot.
// This is optional.
Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"`
// strategy defines how to perform a build.
Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"`
// output describes the Docker image the Strategy should produce.
Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"`
// resources computes resource requirements to execute the build.
Resources kapi.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"`
// postCommit is a build hook executed after the build output image is
// committed, before it is pushed to a registry.
PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"`
// completionDeadlineSeconds is an optional duration in seconds, counted from
// the time when a build pod gets scheduled in the system, that the build may
// be active on a node before the system actively tries to terminate the
// build; value must be positive integer
CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"`
// nodeSelector is a selector which must be true for the build pod to fit on a node
// If nil, it can be overridden by default build nodeselector values for the cluster.
// If set to an empty map or a map with any values, default build nodeselector values
// are ignored.
NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"`
}
// BuildTriggerCause holds information about a triggered build. It is used for
// displaying build trigger data for each build and build configuration in oc
// describe. It is also used to describe which triggers led to the most recent
// update in the build configuration.
type BuildTriggerCause struct {
// message is used to store a human readable message for why the build was
// triggered. E.g.: "Manually triggered by user", "Configuration change",etc.
Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"`
// genericWebHook holds data about a builds generic webhook trigger.
GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"`
// gitHubWebHook represents data for a GitHub webhook that fired a
//specific build.
GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"`
// imageChangeBuild stores information about an imagechange event
// that triggered a new build.
ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"`
}
// GenericWebHookCause holds information about a generic WebHook that
// triggered a build.
type GenericWebHookCause struct {
// revision is an optional field that stores the git source revision
// information of the generic webhook trigger when it is available.
Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
// secret is the obfuscated webhook secret that triggered a build.
Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
}
// GitHubWebHookCause has information about a GitHub webhook that triggered a
// build.
type GitHubWebHookCause struct {
// revision is the git revision information of the trigger.
Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"`
// secret is the obfuscated webhook secret that triggered a build.
Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"`
}
// ImageChangeCause contains information about the image that triggered a
// build
type ImageChangeCause struct {
// imageID is the ID of the image that triggered a a new build.
ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"`
// fromRef contains detailed information about an image that triggered a
// build.
FromRef *kapi.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"`
}
// BuildStatus contains the status of a build
type BuildStatus struct {
// phase is the point in the build lifecycle.
Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"`
// cancelled describes if a cancel event was triggered for the build.
Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"`
// reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"`
// message is a human-readable message indicating details about why the build has this status.
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// startTimestamp is a timestamp representing the server time when this Build started
// running in a Pod.
// It is represented in RFC3339 form and is in UTC.
StartTimestamp *unversioned.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"`
// completionTimestamp is a timestamp representing the server time when this Build was
// finished, whether that build failed or succeeded. It reflects the time at which
// the Pod running the Build terminated.
// It is represented in RFC3339 form and is in UTC.
CompletionTimestamp *unversioned.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"`
// duration contains time.Duration object describing build time.
Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"`
// outputDockerImageReference contains a reference to the Docker image that
// will be built by this build. Its value is computed from
// Build.Spec.Output.To, and should include the registry address, so that
// it can be used to push and pull the image.
OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"`
// config is an ObjectReference to the BuildConfig this Build is based on.
Config *kapi.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"`
}
// BuildPhase represents the status of a build at a point in time.
type BuildPhase string
// Valid values for BuildPhase.
const (
// BuildPhaseNew is automatically assigned to a newly created build.
BuildPhaseNew BuildPhase = "New"
// BuildPhasePending indicates that a pod name has been assigned and a build is
// about to start running.
BuildPhasePending BuildPhase = "Pending"
// BuildPhaseRunning indicates that a pod has been created and a build is running.
BuildPhaseRunning BuildPhase = "Running"
// BuildPhaseComplete indicates that a build has been successful.
BuildPhaseComplete BuildPhase = "Complete"
// BuildPhaseFailed indicates that a build has executed and failed.
BuildPhaseFailed BuildPhase = "Failed"
// BuildPhaseError indicates that an error prevented the build from executing.
BuildPhaseError BuildPhase = "Error"
// BuildPhaseCancelled indicates that a running/pending build was stopped from executing.
BuildPhaseCancelled BuildPhase = "Cancelled"
)
// StatusReason is a brief CamelCase string that describes a temporary or
// permanent build error condition, meant for machine parsing and tidy display
// in the CLI.
type StatusReason string
// BuildSourceType is the type of SCM used.
type BuildSourceType string
// Valid values for BuildSourceType.
const (
//BuildSourceGit instructs a build to use a Git source control repository as the build input.
BuildSourceGit BuildSourceType = "Git"
// BuildSourceDockerfile uses a Dockerfile as the start of a build
BuildSourceDockerfile BuildSourceType = "Dockerfile"
// BuildSourceBinary indicates the build will accept a Binary file as input.
BuildSourceBinary BuildSourceType = "Binary"
// BuildSourceImage indicates the build will accept an image as input
BuildSourceImage BuildSourceType = "Image"
// BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies)
BuildSourceNone BuildSourceType = "None"
)
// BuildSource is the SCM used for the build.
type BuildSource struct {
// type of build input to accept
// +k8s:conversion-gen=false
Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
// binary builds accept a binary as their input. The binary is generally assumed to be a tar,
// gzipped tar, or zip file depending on the strategy. For Docker builds, this is the build
// context and an optional Dockerfile may be specified to override any Dockerfile in the
// build context. For Source builds, this is assumed to be an archive as described above. For
// Source and Docker builds, if binary.asFile is set the build will receive a directory with
// a single file. contextDir may be used when an archive is provided. Custom builds will
// receive this binary as input on STDIN.
Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"`
// dockerfile is the raw contents of a Dockerfile which should be built. When this option is
// specified, the FROM may be modified based on your strategy base image and additional ENV
// stanzas from your strategy environment will be added after the FROM, but before the rest
// of your Dockerfile stanzas. The Dockerfile source type may be used with other options like
// git - in those cases the Git repo will have any innate Dockerfile replaced in the context
// dir.
Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"`
// git contains optional information about git build source
Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"`
// images describes a set of images to be used to provide source for the build
Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"`
// contextDir specifies the sub-directory where the source code for the application exists.
// This allows to have buildable sources in directory other than root of
// repository.
ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"`
// sourceSecret is the name of a Secret that would be used for setting
// up the authentication for cloning private repository.
// The secret contains valid credentials for remote repository, where the
// data's key represent the authentication method to be used and value is
// the base64 encoded credentials. Supported auth methods are: ssh-privatekey.
SourceSecret *kapi.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"`
// secrets represents a list of secrets and their destinations that will
// be used only for the build.
Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"`
}
// ImageSource is used to describe build source that will be extracted from an image. A reference of
// type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified
// to pull the image from an external registry or override the default service account secret if pulling
// from the internal registry. A list of paths to copy from the image and their respective destination
// within the build directory must be specified in the paths array.
type ImageSource struct {
// from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to
// copy source from.
From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
// paths is a list of source and destination paths to copy from the image.
Paths []ImageSourcePath `json:"paths" protobuf:"bytes,2,rep,name=paths"`
// pullSecret is a reference to a secret to be used to pull the image from a registry
// If the image is pulled from the OpenShift registry, this field does not need to be set.
PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"`
}
// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.
type ImageSourcePath struct {
// sourcePath is the absolute path of the file or directory inside the image to
// copy to the build directory.
SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"`
// destinationDir is the relative directory within the build directory
// where files copied from the image are placed.
DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"`
}
// SecretBuildSource describes a secret and its destination directory that will be
// used only at the build time. The content of the secret referenced here will
// be copied into the destination directory instead of mounting.
type SecretBuildSource struct {
// secret is a reference to an existing secret that you want to use in your
// build.
Secret kapi.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"`
// destinationDir is the directory where the files from the secret should be
// available for the build time.
// For the Source build strategy, these will be injected into a container
// where the assemble script runs. Later, when the script finishes, all files
// injected will be truncated to zero length.
// For the Docker build strategy, these will be copied into the build
// directory, where the Dockerfile is located, so users can ADD or COPY them
// during docker build.
DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"`
}
// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies,
// where the file will be extracted and used as the build source.
type BinaryBuildSource struct {
// asFile indicates that the provided binary input should be considered a single file
// within the build input. For example, specifying "webapp.war" would place the provided
// binary as `/webapp.war` for the builder. If left empty, the Docker and Source build
// strategies assume this file is a zip, tar, or tar.gz file and extract it as the source.
// The custom strategy receives this binary as standard input. This filename may not
// contain slashes or be '..' or '.'.
AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"`
}
// SourceRevision is the revision or commit information from the source for the build
type SourceRevision struct {
// type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'
// +k8s:conversion-gen=false
Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
// Git contains information about git-based build source
Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"`
}
// GitSourceRevision is the commit information from a git source for a build
type GitSourceRevision struct {
// commit is the commit hash identifying a specific commit
Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"`
// author is the author of a specific commit
Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"`
// committer is the committer of a specific commit
Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"`
// message is the description of a specific commit
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// ProxyConfig defines what proxies to use for an operation
type ProxyConfig struct {
// httpProxy is a proxy used to reach the git repository over http
HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"`
// httpsProxy is a proxy used to reach the git repository over https
HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"`
// noProxy is the list of domains for which the proxy should not be used
NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"`
}
// GitBuildSource defines the parameters of a Git SCM
type GitBuildSource struct {
// uri points to the source that will be built. The structure of the source
// will depend on the type of build to run
URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"`
// ref is the branch/tag/ref to build.
Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"`
// proxyConfig defines the proxies to use for the git clone operation
ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"`
}
// SourceControlUser defines the identity of a user of source control
type SourceControlUser struct {
// name of the source control user
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// email of the source control user
Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"`
}
// BuildStrategy contains the details of how to perform a build.
type BuildStrategy struct {
// type is the kind of build strategy.
// +k8s:conversion-gen=false
Type BuildStrategyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"`
// dockerStrategy holds the parameters to the Docker build strategy.
DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"`
// sourceStrategy holds the parameters to the Source build strategy.
SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"`
// customStrategy holds the parameters to the Custom build strategy
CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"`
// JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy.
// This strategy is in tech preview.
JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"`
}
// BuildStrategyType describes a particular way of performing a build.
type BuildStrategyType string
// Valid values for BuildStrategyType.
const (
// DockerBuildStrategyType performs builds using a Dockerfile.
DockerBuildStrategyType BuildStrategyType = "Docker"
// SourceBuildStrategyType performs builds build using Source To Images with a Git repository
// and a builder image.
SourceBuildStrategyType BuildStrategyType = "Source"
// CustomBuildStrategyType performs builds using custom builder Docker image.
CustomBuildStrategyType BuildStrategyType = "Custom"
// JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline.
JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline"
)
// CustomBuildStrategy defines input parameters specific to Custom build.
type CustomBuildStrategy struct {
// from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
// the docker image should be pulled
From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
// pullSecret is the name of a Secret that would be used for setting up
// the authentication for pulling the Docker images from the private Docker
// registries
PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
// env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
// exposeDockerSocket will allow running Docker commands (and build Docker images) from
// inside the Docker container.
// TODO: Allow admins to enforce 'false' for this option
ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"`
// forcePull describes if the controller should configure the build pod to always pull the images
// for the builder or only pull if it is not present locally
ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"`
// secrets is a list of additional secrets that will be included in the build pod
Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"`
// buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder
BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"`
}
// DockerBuildStrategy defines input parameters specific to Docker build.
type DockerBuildStrategy struct {
// from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
// the docker image should be pulled
// the resulting image will be used in the FROM line of the Dockerfile for this build.
From *kapi.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"`
// pullSecret is the name of a Secret that would be used for setting up
// the authentication for pulling the Docker images from the private Docker
// registries
PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
// noCache if set to true indicates that the docker build must be executed with the
// --no-cache=true flag
NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"`
// env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"`
// forcePull describes if the builder should pull the images from registry prior to building.
ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"`
// dockerfilePath is the path of the Dockerfile that will be used to build the Docker image,
// relative to the root of the context (contextDir).
DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"`
}
// SourceBuildStrategy defines input parameters specific to an Source build.
type SourceBuildStrategy struct {
// from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which
// the docker image should be pulled
From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"`
// pullSecret is the name of a Secret that would be used for setting up
// the authentication for pulling the Docker images from the private Docker
// registries
PullSecret *kapi.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"`
// env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
// scripts is the location of Source scripts
Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"`
// incremental flag forces the Source build to do incremental builds if true.
Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"`
// forcePull describes if the builder should pull the images from registry prior to building.
ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"`
// runtimeImage is an optional image that is used to run an application
// without unneeded dependencies installed. The building of the application
// is still done in the builder image but, post build, you can copy the
// needed artifacts in the runtime image for use.
// This field and the feature it enables are in tech preview.
RuntimeImage *kapi.ObjectReference `json:"runtimeImage,omitempty" protobuf:"bytes,7,opt,name=runtimeImage"`
// runtimeArtifacts specifies a list of source/destination pairs that will be
// copied from the builder to the runtime image. sourcePath can be a file or
// directory. destinationDir must be a directory. destinationDir can also be
// empty or equal to ".", in this case it just refers to the root of WORKDIR.
// This field and the feature it enables are in tech preview.
RuntimeArtifacts []ImageSourcePath `json:"runtimeArtifacts,omitempty" protobuf:"bytes,8,rep,name=runtimeArtifacts"`
}
// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build.
// This strategy is in tech preview.
type JenkinsPipelineBuildStrategy struct {
// JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline
// relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are
// both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.
JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"`
// Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.
Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"`
}
// A BuildPostCommitSpec holds a build post commit hook specification. The hook
// executes a command in a temporary container running the build output image,
// immediately after the last layer of the image is committed and before the
// image is pushed to a registry. The command is executed with the current
// working directory ($PWD) set to the image's WORKDIR.
//
// The build will be marked as failed if the hook execution fails. It will fail
// if the script or command return a non-zero exit code, or if there is any
// other error related to starting the temporary container.
//
// There are five different ways to configure the hook. As an example, all forms
// below are equivalent and will execute `rake test --verbose`.
//
// 1. Shell script:
//
// "postCommit": {
// "script": "rake test --verbose",
// }
//
// The above is a convenient form which is equivalent to:
//
// "postCommit": {
// "command": ["/bin/sh", "-ic"],
// "args": ["rake test --verbose"]
// }
//
// 2. A command as the image entrypoint:
//
// "postCommit": {
// "commit": ["rake", "test", "--verbose"]
// }
//
// Command overrides the image entrypoint in the exec form, as documented in
// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint.
//
// 3. Pass arguments to the default entrypoint:
//
// "postCommit": {
// "args": ["rake", "test", "--verbose"]
// }
//
// This form is only useful if the image entrypoint can handle arguments.
//
// 4. Shell script with arguments:
//
// "postCommit": {
// "script": "rake test $1",
// "args": ["--verbose"]
// }
//
// This form is useful if you need to pass arguments that would otherwise be
// hard to quote properly in the shell script. In the script, $0 will be
// "/bin/sh" and $1, $2, etc, are the positional arguments from Args.
//
// 5. Command with arguments:
//
// "postCommit": {
// "command": ["rake", "test"],
// "args": ["--verbose"]
// }
//
// This form is equivalent to appending the arguments to the Command slice.
//
// It is invalid to provide both Script and Command simultaneously. If none of
// the fields are specified, the hook is not executed.
type BuildPostCommitSpec struct {
// command is the command to run. It may not be specified with Script.
// This might be needed if the image doesn't have `/bin/sh`, or if you
// do not want to use a shell. In all other cases, using Script might be
// more convenient.
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
// args is a list of arguments that are provided to either Command,
// Script or the Docker image's default entrypoint. The arguments are
// placed immediately after the command to be run.
Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"`
// script is a shell script to be run with `/bin/sh -ic`. It may not be
// specified with Command. Use Script when a shell script is appropriate
// to execute the post build hook, for example for running unit tests
// with `rake test`. If you need control over the image entrypoint, or
// if the image does not have `/bin/sh`, use Command and/or Args.
// The `-i` flag is needed to support CentOS and RHEL images that use
// Software Collections (SCL), in order to have the appropriate
// collections enabled in the shell. E.g., in the Ruby image, this is
// necessary to make `ruby`, `bundle` and other binaries available in
// the PATH.
Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"`
}
// BuildOutput is input to a build strategy and describes the Docker image that the strategy
// should produce.
type BuildOutput struct {
// to defines an optional location to push the output of this build to.
// Kind must be one of 'ImageStreamTag' or 'DockerImage'.
// This value will be used to look up a Docker image repository to push to.
// In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of
// the build unless Namespace is specified.
To *kapi.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"`
// PushSecret is the name of a Secret that would be used for setting
// up the authentication for executing the Docker push to authentication
// enabled Docker Registry (or Docker Hub).
PushSecret *kapi.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"`
// imageLabels define a list of labels that are applied to the resulting image. If there
// are multiple labels with the same name then the last one in the list is used.
ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"`
}
// ImageLabel represents a label applied to the resulting image.
type ImageLabel struct {
// name defines the name of the label. It must have non-zero length.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// value defines the literal value of the label.
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// Build configurations define a build process for new Docker images. There are three types of builds possible - a Docker build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary Docker images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the Docker registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.
//
// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build.
type BuildConfig struct {
unversioned.TypeMeta `json:",inline"`
// metadata for BuildConfig.
kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec holds all the input necessary to produce a new build, and the conditions when
// to trigger them.
Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// status holds any relevant information about a build config
Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"`
}
// BuildConfigSpec describes when and how builds are created
type BuildConfigSpec struct {
//triggers determine how new Builds can be launched from a BuildConfig. If
//no triggers are defined, a new build can only occur as a result of an
//explicit client build creation.
Triggers []BuildTriggerPolicy `json:"triggers" protobuf:"bytes,1,rep,name=triggers"`
// RunPolicy describes how the new build created from this build
// configuration will be scheduled for execution.
// This is optional, if not specified we default to "Serial".
RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"`
// CommonSpec is the desired build specification
CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"`
}
// BuildRunPolicy defines the behaviour of how the new builds are executed
// from the existing build configuration.
type BuildRunPolicy string
const (
// BuildRunPolicyParallel schedules new builds immediately after they are
// created. Builds will be executed in parallel.
BuildRunPolicyParallel BuildRunPolicy = "Parallel"
// BuildRunPolicySerial schedules new builds to execute in a sequence as
// they are created. Every build gets queued up and will execute when the
// previous build completes. This is the default policy.
BuildRunPolicySerial BuildRunPolicy = "Serial"
// BuildRunPolicySerialLatestOnly schedules only the latest build to execute,
// cancelling all the previously queued build.
BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly"
)
// BuildConfigStatus contains current state of the build config object.
type BuildConfigStatus struct {
// lastVersion is used to inform about number of last triggered build.
LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"`
}
// WebHookTrigger is a trigger that gets invoked using a webhook type of post
type WebHookTrigger struct {
// secret used to validate requests.
Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// allowEnv determines whether the webhook can set environment variables; can only
// be set to true for GenericWebHook.
AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"`
}
// ImageChangeTrigger allows builds to be triggered when an ImageStream changes
type ImageChangeTrigger struct {
// lastTriggeredImageID is used internally by the ImageChangeController to save last
// used image ID for build
LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"`
// from is a reference to an ImageStreamTag that will trigger a build when updated
// It is optional. If no From is specified, the From image from the build strategy
// will be used. Only one ImageChangeTrigger with an empty From reference is allowed in
// a build configuration.
From *kapi.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
}
// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.
type BuildTriggerPolicy struct {
// type is the type of build trigger
Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"`
// github contains the parameters for a GitHub webhook type of trigger
GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"`
// generic contains the parameters for a Generic webhook type of trigger
GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"`
// imageChange contains parameters for an ImageChange type of trigger
ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"`
}
// BuildTriggerType refers to a specific BuildTriggerPolicy implementation.
type BuildTriggerType string
const (
// GitHubWebHookBuildTriggerType represents a trigger that launches builds on
// GitHub webhook invocations
GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub"
GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github"
// GenericWebHookBuildTriggerType represents a trigger that launches builds on
// generic webhook invocations
GenericWebHookBuildTriggerType BuildTriggerType = "Generic"
GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic"
// ImageChangeBuildTriggerType represents a trigger that launches builds on
// availability of a new version of an image
ImageChangeBuildTriggerType BuildTriggerType = "ImageChange"
ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange"
// ConfigChangeBuildTriggerType will trigger a build on an initial build config creation
// WARNING: In the future the behavior will change to trigger a build on any config change
ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange"
)
// BuildList is a collection of Builds.
type BuildList struct {
unversioned.TypeMeta `json:",inline"`
// metadata for BuildList.
unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a list of builds
Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// BuildConfigList is a collection of BuildConfigs.
type BuildConfigList struct {
unversioned.TypeMeta `json:",inline"`
// metadata for BuildConfigList.
unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a list of build configs
Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// GenericWebHookEvent is the payload expected for a generic webhook post
type GenericWebHookEvent struct {
// type is the type of source repository
// +k8s:conversion-gen=false
Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"`
// git is the git information if the Type is BuildSourceGit
Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"`
// env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
}
// GitInfo is the aggregated git information for a generic webhook post
type GitInfo struct {
GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"`
GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"`
}
// BuildLog is the (unused) resource associated with the build log redirector
type BuildLog struct {
unversioned.TypeMeta `json:",inline"`
}
// BuildRequest is the resource used to pass parameters to build generator
type BuildRequest struct {
unversioned.TypeMeta `json:",inline"`
// metadata for BuildRequest.
kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// revision is the information from the source for a specific repo snapshot.
Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// triggeredByImage is the Image that triggered this build.
TriggeredByImage *kapi.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"`
// from is the reference to the ImageStreamTag that triggered the build.
From *kapi.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"`
// binary indicates a request to build from a binary provided to the builder
Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"`
// lastVersion (optional) is the LastVersion of the BuildConfig that was used
// to generate the build. If the BuildConfig in the generator doesn't match, a build will
// not be generated.
LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"`
// env contains additional environment variables you want to pass into a builder container
Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"`
// triggeredBy describes which triggers started the most recent update to the
// build configuration and contains information about those triggers.
TriggeredBy []BuildTriggerCause `json:"triggeredBy" protobuf:"bytes,8,rep,name=triggeredBy"`
}
// BinaryBuildRequestOptions are the options required to fully speficy a binary build request
type BinaryBuildRequestOptions struct {
unversioned.TypeMeta `json:",inline"`
// metadata for BinaryBuildRequestOptions.
kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// asFile determines if the binary should be created as a file within the source rather than extracted as an archive
AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"`
// TODO: Improve map[string][]string conversion so we can handled nested objects
// revision.commit is the value identifying a specific commit
Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"`
// revision.message is the description of a specific commit
Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"`
// revision.authorName of the source control user
AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"`
// revision.authorEmail of the source control user
AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"`
// revision.committerName of the source control user
CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"`
// revision.committerEmail of the source control user
CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"`
}
// BuildLogOptions is the REST options for a build log
type BuildLogOptions struct {
unversioned.TypeMeta `json:",inline"`
// cointainer for which to stream logs. Defaults to only container if there is one container in the pod.
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// follow if true indicates that the build log should be streamed until
// the build terminates.
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// previous returns previous build logs. Defaults to false.
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// sinceTime is an RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// tailLines, If set, is the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// limitBytes, If set, is the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
// noWait if true causes the call to return immediately even if the build
// is not available yet. Otherwise the server will wait until the build has started.
// TODO: Fix the tag to 'noWait' in v2
NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"`
// version of the build for which to view logs.
Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"`
}
// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point
type SecretSpec struct {
// secretSource is a reference to the secret
SecretSource kapi.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"`
// mountPath is the path at which to mount the secret
MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"`
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
package client
import (
buildapi "github.com/openshift/origin/pkg/build/api"
osclient "github.com/openshift/origin/pkg/client"
kapi "k8s.io/kubernetes/pkg/api"
)
// BuildConfigGetter provides methods for getting BuildConfigs
type BuildConfigGetter interface {
Get(namespace, name string) (*buildapi.BuildConfig, error)
}
// BuildConfigUpdater provides methods for updating BuildConfigs
type BuildConfigUpdater interface {
Update(buildConfig *buildapi.BuildConfig) error
}
// OSClientBuildConfigClient delegates get and update operations to the OpenShift client interface
type OSClientBuildConfigClient struct {
Client osclient.Interface
}
// NewOSClientBuildConfigClient creates a new build config client that uses an openshift client to create and get BuildConfigs
func NewOSClientBuildConfigClient(client osclient.Interface) *OSClientBuildConfigClient {
return &OSClientBuildConfigClient{Client: client}
}
// Get returns a BuildConfig using the OpenShift client.
func (c OSClientBuildConfigClient) Get(namespace, name string) (*buildapi.BuildConfig, error) {
return c.Client.BuildConfigs(namespace).Get(name)
}
// Update updates a BuildConfig using the OpenShift client.
func (c OSClientBuildConfigClient) Update(buildConfig *buildapi.BuildConfig) error {
_, err := c.Client.BuildConfigs(buildConfig.Namespace).Update(buildConfig)
return err
}
// BuildUpdater provides methods for updating existing Builds.
type BuildUpdater interface {
Update(namespace string, build *buildapi.Build) error
}
// BuildLister provides methods for listing the Builds.
type BuildLister interface {
List(namespace string, opts kapi.ListOptions) (*buildapi.BuildList, error)
}
// OSClientBuildClient deletes build create and update operations to the OpenShift client interface
type OSClientBuildClient struct {
Client osclient.Interface
}
// NewOSClientBuildClient creates a new build client that uses an openshift client to update builds
func NewOSClientBuildClient(client osclient.Interface) *OSClientBuildClient {
return &OSClientBuildClient{Client: client}
}
// Update updates builds using the OpenShift client.
func (c OSClientBuildClient) Update(namespace string, build *buildapi.Build) error {
_, e := c.Client.Builds(namespace).Update(build)
return e
}
// List lists the builds using the OpenShift client.
func (c OSClientBuildClient) List(namespace string, opts kapi.ListOptions) (*buildapi.BuildList, error) {
return c.Client.Builds(namespace).List(opts)
}
// BuildCloner provides methods for cloning builds
type BuildCloner interface {
Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error)
}
// OSClientBuildClonerClient creates a new build client that uses an openshift client to clone builds
type OSClientBuildClonerClient struct {
Client osclient.Interface
}
// NewOSClientBuildClonerClient creates a new build client that uses an openshift client to clone builds
func NewOSClientBuildClonerClient(client osclient.Interface) *OSClientBuildClonerClient {
return &OSClientBuildClonerClient{Client: client}
}
// Clone generates new build for given build name
func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) {
return c.Client.Builds(namespace).Clone(request)
}
// BuildConfigInstantiator provides methods for instantiating builds from build configs
type BuildConfigInstantiator interface {
Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error)
}
// OSClientBuildConfigInstantiatorClient creates a new build client that uses an openshift client to create builds
type OSClientBuildConfigInstantiatorClient struct {
Client osclient.Interface
}
// NewOSClientBuildConfigInstantiatorClient creates a new build client that uses an openshift client to create builds
func NewOSClientBuildConfigInstantiatorClient(client osclient.Interface) *OSClientBuildConfigInstantiatorClient {
return &OSClientBuildConfigInstantiatorClient{Client: client}
}
// Instantiate generates new build for given buildConfig
func (c OSClientBuildConfigInstantiatorClient) Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) {
return c.Client.BuildConfigs(namespace).Instantiate(request)
}

View File

@ -0,0 +1,3 @@
// Package util contains common functions that are used
// by the rest of the OpenShift build system.
package util

View File

@ -0,0 +1,152 @@
package util
import (
"fmt"
"strconv"
"strings"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/labels"
"github.com/golang/glog"
buildapi "github.com/openshift/origin/pkg/build/api"
buildclient "github.com/openshift/origin/pkg/build/client"
)
const (
// NoBuildLogsMessage reports that no build logs are available
NoBuildLogsMessage = "No logs are available."
)
// GetBuildName returns name of the build pod.
func GetBuildName(pod *kapi.Pod) string {
if pod == nil {
return ""
}
return pod.Annotations[buildapi.BuildAnnotation]
}
// GetInputReference returns the From ObjectReference associated with the
// BuildStrategy.
func GetInputReference(strategy buildapi.BuildStrategy) *kapi.ObjectReference {
switch {
case strategy.SourceStrategy != nil:
return &strategy.SourceStrategy.From
case strategy.DockerStrategy != nil:
return strategy.DockerStrategy.From
case strategy.CustomStrategy != nil:
return &strategy.CustomStrategy.From
default:
return nil
}
}
// IsBuildComplete returns whether the provided build is complete or not
func IsBuildComplete(build *buildapi.Build) bool {
return build.Status.Phase != buildapi.BuildPhaseRunning && build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew
}
// IsPaused returns true if the provided BuildConfig is paused and cannot be used to create a new Build
func IsPaused(bc *buildapi.BuildConfig) bool {
return strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true"
}
// BuildNumber returns the given build number.
func BuildNumber(build *buildapi.Build) (int64, error) {
annotations := build.GetAnnotations()
if stringNumber, ok := annotations[buildapi.BuildNumberAnnotation]; ok {
return strconv.ParseInt(stringNumber, 10, 64)
}
return 0, fmt.Errorf("build %s/%s does not have %s annotation", build.Namespace, build.Name, buildapi.BuildNumberAnnotation)
}
// BuildRunPolicy returns the scheduling policy for the build based on the
// "queued" label.
func BuildRunPolicy(build *buildapi.Build) buildapi.BuildRunPolicy {
labels := build.GetLabels()
if value, found := labels[buildapi.BuildRunPolicyLabel]; found {
switch value {
case "Parallel":
return buildapi.BuildRunPolicyParallel
case "Serial":
return buildapi.BuildRunPolicySerial
case "SerialLatestOnly":
return buildapi.BuildRunPolicySerialLatestOnly
}
}
glog.V(5).Infof("Build %s/%s does not have start policy label set, using default (Serial)")
return buildapi.BuildRunPolicySerial
}
// BuildNameForConfigVersion returns the name of the version-th build
// for the config that has the provided name.
func BuildNameForConfigVersion(name string, version int) string {
return fmt.Sprintf("%s-%d", name, version)
}
// BuildConfigSelector returns a label Selector which can be used to find all
// builds for a BuildConfig.
func BuildConfigSelector(name string) labels.Selector {
return labels.Set{buildapi.BuildConfigLabel: buildapi.LabelValue(name)}.AsSelector()
}
// BuildConfigSelectorDeprecated returns a label Selector which can be used to find
// all builds for a BuildConfig that use the deprecated labels.
func BuildConfigSelectorDeprecated(name string) labels.Selector {
return labels.Set{buildapi.BuildConfigLabelDeprecated: name}.AsSelector()
}
type buildFilter func(buildapi.Build) bool
// BuildConfigBuilds return a list of builds for the given build config.
// Optionally you can specify a filter function to select only builds that
// matches your criteria.
func BuildConfigBuilds(c buildclient.BuildLister, namespace, name string, filterFunc buildFilter) (*buildapi.BuildList, error) {
result, err := c.List(namespace, kapi.ListOptions{
LabelSelector: BuildConfigSelector(name),
})
if err != nil {
return nil, err
}
if filterFunc == nil {
return result, nil
}
filteredList := &buildapi.BuildList{TypeMeta: result.TypeMeta, ListMeta: result.ListMeta}
for _, b := range result.Items {
if filterFunc(b) {
filteredList.Items = append(filteredList.Items, b)
}
}
return filteredList, nil
}
// ConfigNameForBuild returns the name of the build config from a
// build name.
func ConfigNameForBuild(build *buildapi.Build) string {
if build == nil {
return ""
}
if build.Annotations != nil {
if _, exists := build.Annotations[buildapi.BuildConfigAnnotation]; exists {
return build.Annotations[buildapi.BuildConfigAnnotation]
}
}
if _, exists := build.Labels[buildapi.BuildConfigLabel]; exists {
return build.Labels[buildapi.BuildConfigLabel]
}
return build.Labels[buildapi.BuildConfigLabelDeprecated]
}
// VersionForBuild returns the version from the provided build name.
// If no version can be found, 0 is returned to indicate no version.
func VersionForBuild(build *buildapi.Build) int {
if build == nil {
return 0
}
versionString := build.Annotations[buildapi.BuildNumberAnnotation]
version, err := strconv.Atoi(versionString)
if err != nil {
return 0
}
return version
}