Remove up and down cmd (#1297)

* Remove up and down cmd
This commit is contained in:
Hang Yan 2020-08-01 16:01:34 +08:00 committed by GitHub
parent 6bf75ae11b
commit 47e6015f50
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
459 changed files with 14 additions and 105335 deletions

View File

@ -48,6 +48,8 @@ var (
ConvertPushImage bool
ConvertOpt kobject.ConvertOptions
ConvertYAMLIndent int
UpBuild string
)
var convertCmd = &cobra.Command{

View File

@ -1,66 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"strings"
"github.com/kubernetes/kompose/pkg/app"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/spf13/cobra"
)
// TODO: comment
var (
DownNamespace string
DownController string
DownOpt kobject.ConvertOptions
// DownServer allow use to choose different kubernetes server url
DownServer string
)
var downCmd = &cobra.Command{
Use: "down",
Short: "Delete instantiated services/deployments from kubernetes",
Long: `Delete instantiated services/deployments from kubernetes. (default "kubernetes")`,
PreRun: func(cmd *cobra.Command, args []string) {
// Create the Convert options.
DownOpt = kobject.ConvertOptions{
InputFiles: GlobalFiles,
Provider: GlobalProvider,
Namespace: DownNamespace,
Controller: strings.ToLower(DownController),
IsNamespaceFlag: cmd.Flags().Lookup("namespace").Changed,
Server: DownServer,
}
// Validate before doing anything else.
app.ValidateComposeFile(&DownOpt)
},
Run: func(cmd *cobra.Command, args []string) {
app.Down(DownOpt)
},
}
func init() {
downCmd.Flags().StringVar(&DownNamespace, "namespace", "default", " Specify Namespace to deploy your application")
downCmd.Flags().StringVar(&DownController, "controller", "", `Set the output controller ("deployment"|"daemonSet"|"replicationController")`)
downCmd.Flags().StringVar(&DownServer, "server", "https://127.0.0.1:6443", "kubernetes apiserver url")
RootCmd.AddCommand(downCmd)
}

139
cmd/up.go
View File

@ -1,139 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/kubernetes/kompose/pkg/app"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/spf13/cobra"
"strings"
)
// TODO: comment
var (
UpReplicas int
UpEmptyVols bool
StoreManifest bool
UpVolumes string
UpInsecureRepo bool
UpNamespace string
UpOpt kobject.ConvertOptions
UpBuild string
UpBuildBranch string
UpBuildRepo string
UpController string
// UpPushImage decides if we should push the docker image
UpPushImage bool
// UpServer allow use to choose different kubernetes server url
UpServer string
)
var upCmd = &cobra.Command{
Use: "up",
Short: "Deploy your Dockerized application to a container orchestrator.",
Long: `Deploy your Dockerized application to a container orchestrator. (default "kubernetes")`,
PreRun: func(cmd *cobra.Command, args []string) {
// Check that build-config wasn't passed in with --provider=kubernetes
if GlobalProvider == "kubernetes" && UpBuild == "build-config" {
log.Fatalf("build-config is not a valid --build parameter with provider Kubernetes")
}
// Create the Convert options.
UpOpt = kobject.ConvertOptions{
Build: UpBuild,
PushImage: UpPushImage,
StoreManifest: StoreManifest,
Replicas: UpReplicas,
InputFiles: GlobalFiles,
Provider: GlobalProvider,
EmptyVols: UpEmptyVols,
Volumes: UpVolumes,
Namespace: UpNamespace,
InsecureRepository: UpInsecureRepo,
BuildBranch: UpBuildBranch,
BuildRepo: UpBuildRepo,
Controller: strings.ToLower(UpController),
IsNamespaceFlag: cmd.Flags().Lookup("namespace").Changed,
Server: UpServer,
}
// Validate before doing anything else.
app.ValidateComposeFile(&UpOpt)
},
Run: func(cmd *cobra.Command, args []string) {
app.Up(UpOpt)
},
}
func init() {
upCmd.Flags().IntVar(&UpReplicas, "replicas", 1, "Specify the number of replicas generated")
upCmd.Flags().StringVar(&UpVolumes, "volumes", "persistentVolumeClaim", `Volumes to be generated ("persistentVolumeClaim"|"emptyDir"|"hostPath")`)
upCmd.Flags().BoolVar(&UpInsecureRepo, "insecure-repository", false, "Use an insecure Docker repository for OpenShift ImageStream")
upCmd.Flags().StringVar(&UpNamespace, "namespace", "default", "Specify Namespace to deploy your application")
upCmd.Flags().StringVar(&UpBuild, "build", "local", `Set the type of build ("local"|"build-config" (OpenShift only)|"none")`)
upCmd.Flags().StringVar(&UpBuildRepo, "build-repo", "", "Specify source repository for buildconfig (default remote origin)")
upCmd.Flags().StringVar(&UpBuildBranch, "build-branch", "", "Specify repository branch to use for buildconfig (default master)")
upCmd.Flags().BoolVar(&UpPushImage, "push-image", true, "If we should push the docker image we built")
upCmd.Flags().BoolVar(&StoreManifest, "store-manifest", false, "Store the generated manifest (default false)")
upCmd.Flags().StringVar(&UpController, "controller", "", `Set the output controller ("deployment"|"daemonSet"|"replicationController")`)
upCmd.Flags().StringVar(&UpServer, "server", "https://127.0.0.1:6443", "kubernetes apiserver url")
upCmd.Flags().MarkHidden("insecure-repository")
upCmd.Flags().MarkHidden("build-repo")
upCmd.Flags().MarkHidden("build-branch")
// Deprecated
upCmd.Flags().BoolVar(&UpEmptyVols, "emptyvols", false, "Use empty volumes. Do not generate PersistentVolumeClaim")
upCmd.Flags().MarkDeprecated("emptyvols", "emptyvols has been marked as deprecated. Use --volumes empty")
// In order to 'separate' both OpenShift and Kubernetes only flags. A custom help page is created
customHelp := `Usage:{{if .Runnable}}
{{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
{{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}}
Aliases:
{{.NameAndAliases}}
{{end}}{{if .HasExample}}
Examples:
{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if .IsAvailableCommand}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}}
OpenShift Flags:
--build-branch Specify repository branch to use for buildconfig (default is current branch name)
--build-repo Specify source repository for buildconfig (default is current branch's remote url)
--insecure-repository Specify to use insecure docker repository while generating Openshift image stream object
Flags:
{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsHelpCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`
// Set the help template + add the command to root
upCmd.SetUsageTemplate(customHelp)
RootCmd.AddCommand(upCmd)
}

View File

@ -54,20 +54,7 @@ INFO Kubernetes file "redis-master-deployment.yaml" created
INFO Kubernetes file "redis-slave-deployment.yaml" created
```
Alternatively, you can convert and deploy directly to Kubernetes with `kompose up`.
```sh
$ kompose up
We are going to create Kubernetes Deployments, Services and PersistentVolumeClaims for your Dockerized application.
If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead.
INFO Successfully created Service: redis
INFO Successfully created Service: web
INFO Successfully created Deployment: redis
INFO Successfully created Deployment: web
Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods,pvc' for details.
```
Then you can use `kubectl apply` to create these resources in kubernetes.
__Access the newly deployed service:__
@ -163,24 +150,7 @@ INFO OpenShift file "redis-slave-imagestream.yaml" created
Alternatively, you can convert and deploy directly to OpenShift with `kompose up --provider=openshift`.
```sh
$ kompose up --provider=openshift
INFO We are going to create OpenShift DeploymentConfigs, Services and PersistentVolumeClaims for your Dockerized application.
If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead.
INFO Deploying application in "myproject" namespace
INFO Successfully created Service: frontend
INFO Successfully created Service: redis-master
INFO Successfully created Service: redis-slave
INFO Successfully created DeploymentConfig: frontend
INFO Successfully created ImageStream: frontend
INFO Successfully created DeploymentConfig: redis-master
INFO Successfully created ImageStream: redis-master
INFO Successfully created DeploymentConfig: redis-slave
INFO Successfully created ImageStream: redis-slave
Your application has been deployed to OpenShift. You can run 'oc get dc,svc,is,pvc' for details.
```
Then you can use `kubectl apply` to create these resources in OpenShift cluster.
__Access the newly deployed service:__
@ -299,26 +269,7 @@ INFO OpenShift file "redis-slave-deploymentconfig.yaml" created
INFO OpenShift file "redis-slave-imagestream.yaml" created
```
Alternatively, you can convert and deploy directly to OpenShift with `kompose up --provider=openshift`.
```sh
$ kompose up --provider=openshift
INFO We are going to create OpenShift DeploymentConfigs, Services and PersistentVolumeClaims for your Dockerized application.
If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead.
INFO Deploying application in "myproject" namespace
INFO Successfully created Service: frontend
INFO Successfully created Service: redis-master
INFO Successfully created Service: redis-slave
INFO Successfully created DeploymentConfig: frontend
INFO Successfully created ImageStream: frontend
INFO Successfully created DeploymentConfig: redis-master
INFO Successfully created ImageStream: redis-master
INFO Successfully created DeploymentConfig: redis-slave
INFO Successfully created ImageStream: redis-slave
Your application has been deployed to OpenShift. You can run 'oc get dc,svc,is,pvc' for details.
```
Then you can use `kubectl apply` to create these resources in OpenShift.
__Access the newly deployed service:__

View File

@ -16,21 +16,14 @@ Why do developers love it?
1. [Use an example docker-compose.yaml file](https://raw.githubusercontent.com/kubernetes/kompose/master/examples/docker-compose-v3.yaml) or your own
2. Run `kompose up`
3. Check your Kubernetes cluster for your newly deployed containers!
3. Run `kubectl apply` and check your Kubernetes cluster for your newly deployed containers!
```sh
$ wget https://raw.githubusercontent.com/kubernetes/kompose/master/examples/docker-compose-v3.yaml -O docker-compose.yaml
$ kompose up
We are going to create Kubernetes Deployments, Services and PersistentVolumeClaims for your Dockerized application.
If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead.
$ kompose convert
INFO Successfully created Service: redis
INFO Successfully created Service: web
INFO Successfully created Deployment: redis
INFO Successfully created Deployment: web
Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods,pvc' for details.
$ kubectl apply -f *.yaml
$ kubectl get po
NAME READY STATUS RESTARTS AGE

View File

@ -253,67 +253,6 @@ func Convert(opt kobject.ConvertOptions) {
}
}
// Up brings up deployment, svc.
func Up(opt kobject.ConvertOptions) {
validateControllers(&opt)
// loader parses input from file into komposeObject.
l, err := loader.GetLoader(inputFormat)
if err != nil {
log.Fatal(err)
}
komposeObject := kobject.KomposeObject{
ServiceConfigs: make(map[string]kobject.ServiceConfig),
}
komposeObject, err = l.LoadFile(opt.InputFiles)
if err != nil {
log.Fatalf(err.Error())
}
// Get the transformer
t := getTransformer(opt)
//Submit objects to provider
errDeploy := t.Deploy(komposeObject, opt)
if errDeploy != nil {
log.Fatalf("Error while deploying application: %s", errDeploy)
}
}
// Down deletes all deployment, svc.
func Down(opt kobject.ConvertOptions) {
validateControllers(&opt)
// loader parses input from file into komposeObject.
l, err := loader.GetLoader(inputFormat)
if err != nil {
log.Fatal(err)
}
komposeObject := kobject.KomposeObject{
ServiceConfigs: make(map[string]kobject.ServiceConfig),
}
komposeObject, err = l.LoadFile(opt.InputFiles)
if err != nil {
log.Fatalf(err.Error())
}
// Get the transformer
t := getTransformer(opt)
//Remove deployed application
errUndeploy := t.Undeploy(komposeObject, opt)
if errUndeploy != nil {
for _, err = range errUndeploy {
log.Fatalf("Error while deleting application: %s", err)
}
}
}
// Convenience method to return the appropriate Transformer based on
// what provider we are using.
func getTransformer(opt kobject.ConvertOptions) transformer.Transformer {

View File

@ -18,22 +18,18 @@ package kubernetes
import (
"fmt"
"github.com/spf13/pflag"
"io/ioutil"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"os"
"path"
"reflect"
"regexp"
"strconv"
"time"
"github.com/fatih/structs"
"github.com/kubernetes/kompose/pkg/kobject"
"github.com/kubernetes/kompose/pkg/transformer"
buildapi "github.com/openshift/origin/pkg/build/api"
deployapi "github.com/openshift/origin/pkg/deploy/api"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"path"
"reflect"
"regexp"
"strconv"
// install kubernetes api
_ "k8s.io/kubernetes/pkg/api/install"
@ -44,10 +40,6 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr"
@ -60,10 +52,6 @@ import (
"github.com/kubernetes/kompose/pkg/loader/compose"
"github.com/pkg/errors"
"github.com/spf13/cast"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/labels"
utilflag "k8s.io/kubernetes/pkg/util/flag"
)
// Kubernetes implements Transformer interface and represents Kubernetes transformer
@ -1296,381 +1284,3 @@ func (k *Kubernetes) UpdateController(obj runtime.Object, updateTemplate func(*a
}
return nil
}
// DefaultClientConfig get default client config.
// This function is copied from library , we just overrides the apiserver url
func (k *Kubernetes) DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
// use the standard defaults for this client command
// DEPRECATED: remove and replace with something more accurate
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
clusterDefaults := clientcmd.ClusterDefaults
clusterDefaults.Server = "https://127.0.0.1:6443"
if k.Opt.Server != "" {
clusterDefaults.Server = k.Opt.Server
}
overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clusterDefaults}
flagNames := clientcmd.RecommendedConfigOverrideFlags("")
// short flagnames are disabled by default. These are here for compatibility with existing scripts
flagNames.ClusterOverrideFlags.APIServer.ShortName = "s"
clientcmd.BindOverrideFlags(overrides, flags, flagNames)
clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)
return clientConfig
}
// GetKubernetesClient creates the k8s Client, returns k8s client and namespace
func (k *Kubernetes) GetKubernetesClient() (*client.Client, string, error) {
// generate a new client config
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags
oc := k.DefaultClientConfig(flags)
// initialize Kubernetes client
factory := cmdutil.NewFactory(oc)
clientConfig, err := factory.ClientConfig()
if err != nil {
return nil, "", err
}
client := client.NewOrDie(clientConfig)
// get namespace from config
namespace, _, err := factory.DefaultNamespace()
if err != nil {
return nil, "", err
}
return client, namespace, nil
}
// Deploy submits deployment and svc to k8s endpoint
func (k *Kubernetes) Deploy(komposeObject kobject.KomposeObject, opt kobject.ConvertOptions) error {
//Convert komposeObject
objects, err := k.Transform(komposeObject, opt)
if err != nil {
return errors.Wrap(err, "k.Transform failed")
}
if opt.StoreManifest {
log.Info("Store manifest to disk")
if err := PrintList(objects, opt); err != nil {
return errors.Wrap(err, "Store manifest failed")
}
}
pvcStr := " "
if !opt.EmptyVols || opt.Volumes != "emptyDir" {
pvcStr = " and PersistentVolumeClaims "
}
log.Info("We are going to create Kubernetes Deployments, Services" + pvcStr + "for your Dockerized application. " +
"If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead. \n")
client, ns, err := k.GetKubernetesClient()
namespace := ns
if opt.IsNamespaceFlag {
namespace = opt.Namespace
}
if err != nil {
return err
}
pvcCreatedSet := make(map[string]bool)
log.Infof("Deploying application in %q namespace", namespace)
for _, v := range objects {
switch t := v.(type) {
case *extensions.Deployment:
_, err := client.Deployments(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Deployment: %s", t.Name)
case *extensions.DaemonSet:
_, err := client.DaemonSets(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created DaemonSet: %s", t.Name)
case *api.ReplicationController:
_, err := client.ReplicationControllers(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created ReplicationController: %s", t.Name)
case *api.Service:
_, err := client.Services(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Service: %s", t.Name)
case *api.PersistentVolumeClaim:
if pvcCreatedSet[t.Name] {
log.Infof("Skip creation of PersistentVolumeClaim as it is already created: %s", t.Name)
} else {
_, err := client.PersistentVolumeClaims(namespace).Create(t)
if err != nil {
return err
}
pvcCreatedSet[t.Name] = true
storage := t.Spec.Resources.Requests[api.ResourceStorage]
capacity := storage.String()
log.Infof("Successfully created PersistentVolumeClaim: %s of size %s. If your cluster has dynamic storage provisioning, you don't have to do anything. Otherwise you have to create PersistentVolume to make PVC work", t.Name, capacity)
}
case *extensions.Ingress:
_, err := client.Ingress(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Ingress: %s", t.Name)
case *api.Pod:
_, err := client.Pods(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Pod: %s", t.Name)
case *api.ConfigMap:
_, err := client.ConfigMaps(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Config Map: %s", t.Name)
}
}
if !opt.EmptyVols || opt.Volumes != "emptyDir" {
pvcStr = ",pvc"
} else {
pvcStr = ""
}
fmt.Println("\nYour application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods" + pvcStr + "' for details.")
return nil
}
// Undeploy deletes deployed objects from Kubernetes cluster
func (k *Kubernetes) Undeploy(komposeObject kobject.KomposeObject, opt kobject.ConvertOptions) []error {
var errorList []error
//Convert komposeObject
objects, err := k.Transform(komposeObject, opt)
if err != nil {
errorList = append(errorList, err)
return errorList
}
client, ns, err := k.GetKubernetesClient()
namespace := ns
if opt.IsNamespaceFlag {
namespace = opt.Namespace
}
if err != nil {
errorList = append(errorList, err)
return errorList
}
log.Infof("Deleting application in %q namespace", namespace)
for _, v := range objects {
label := labels.SelectorFromSet(labels.Set(map[string]string{transformer.Selector: v.(meta.Object).GetName()}))
options := api.ListOptions{LabelSelector: label}
komposeLabel := map[string]string{transformer.Selector: v.(meta.Object).GetName()}
switch t := v.(type) {
case *extensions.Deployment:
//delete deployment
deployment, err := client.Deployments(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range deployment.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpDeployment, err := kubectl.ReaperFor(extensions.Kind("Deployment"), client)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpDeployment.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Deployment: %s", t.Name)
}
}
case *extensions.DaemonSet:
//delete deployment
daemonset, err := client.DaemonSets(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range daemonset.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpDaemonset, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), client)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpDaemonset.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted DaemonSet: %s", t.Name)
}
}
case *api.ReplicationController:
//delete deployment
replicationController, err := client.ReplicationControllers(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range replicationController.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpReplicationController, err := kubectl.ReaperFor(api.Kind("ReplicationController"), client)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpReplicationController.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted ReplicationController: %s", t.Name)
}
}
case *api.Service:
//delete svc
svc, err := client.Services(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range svc.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpService, err := kubectl.ReaperFor(api.Kind("Service"), client)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpService.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Service: %s", t.Name)
}
}
case *api.PersistentVolumeClaim:
// delete pvc
pvc, err := client.PersistentVolumeClaims(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range pvc.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
err = client.PersistentVolumeClaims(namespace).Delete(t.Name)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted PersistentVolumeClaim: %s", t.Name)
}
}
case *extensions.Ingress:
// delete ingress
ingDeleteOptions := &api.DeleteOptions{
TypeMeta: unversioned.TypeMeta{
Kind: "Ingress",
APIVersion: "extensions/v1beta1",
},
}
ingress, err := client.Ingress(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range ingress.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
err = client.Ingress(namespace).Delete(t.Name, ingDeleteOptions)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Ingress: %s", t.Name)
}
}
case *api.Pod:
//delete pod
pod, err := client.Pods(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
}
for _, l := range pod.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpPod, err := kubectl.ReaperFor(api.Kind("Pod"), client)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpPod.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Pod: %s", t.Name)
}
}
case *api.ConfigMap:
// delete ConfigMap
configMap, err := client.ConfigMaps(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range configMap.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
err = client.ConfigMaps(namespace).Delete(t.Name)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted ConfigMap: %s", t.Name)
}
}
}
}
return errorList
}

View File

@ -27,29 +27,16 @@ import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/runtime"
oclient "github.com/openshift/origin/pkg/client"
ocliconfig "github.com/openshift/origin/pkg/cmd/cli/config"
"time"
"reflect"
"sort"
"github.com/kubernetes/kompose/pkg/transformer"
buildapi "github.com/openshift/origin/pkg/build/api"
buildconfigreaper "github.com/openshift/origin/pkg/build/cmd"
deployapi "github.com/openshift/origin/pkg/deploy/api"
deploymentconfigreaper "github.com/openshift/origin/pkg/deploy/cmd"
imageapi "github.com/openshift/origin/pkg/image/api"
routeapi "github.com/openshift/origin/pkg/route/api"
"github.com/pkg/errors"
"k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr"
)
@ -439,281 +426,3 @@ func (o *OpenShift) Transform(komposeObject kobject.KomposeObject, opt kobject.C
return allobjects, nil
}
// Create OpenShift client, returns OpenShift client
func (o *OpenShift) getOpenShiftClient() (*oclient.Client, error) {
// initialize OpenShift Client
loadingRules := ocliconfig.NewOpenShiftClientConfigLoadingRules()
overrides := &clientcmd.ConfigOverrides{}
oclientConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides).ClientConfig()
if err != nil {
return nil, err
}
oc := oclient.NewOrDie(oclientConfig)
return oc, nil
}
// Deploy transforms and deploys kobject to OpenShift
func (o *OpenShift) Deploy(komposeObject kobject.KomposeObject, opt kobject.ConvertOptions) error {
//Convert komposeObject
objects, err := o.Transform(komposeObject, opt)
if err != nil {
return errors.Wrap(err, "o.Transform failed")
}
pvcStr := " "
if !opt.EmptyVols || opt.Volumes != "emptyDir" {
pvcStr = " and PersistentVolumeClaims "
}
log.Info("We are going to create OpenShift DeploymentConfigs, Services" + pvcStr + "for your Dockerized application. \n" +
"If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead. \n")
oc, err := o.getOpenShiftClient()
if err != nil {
return err
}
kclient, ns, err := o.GetKubernetesClient()
if err != nil {
return err
}
namespace := ns
if opt.IsNamespaceFlag {
namespace = opt.Namespace
}
log.Infof("Deploying application in %q namespace", namespace)
for _, v := range objects {
switch t := v.(type) {
case *imageapi.ImageStream:
_, err := oc.ImageStreams(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created ImageStream: %s", t.Name)
case *buildapi.BuildConfig:
_, err := oc.BuildConfigs(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created BuildConfig: %s", t.Name)
case *deployapi.DeploymentConfig:
_, err := oc.DeploymentConfigs(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created DeploymentConfig: %s", t.Name)
case *kapi.Service:
_, err := kclient.Services(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Service: %s", t.Name)
case *kapi.PersistentVolumeClaim:
_, err := kclient.PersistentVolumeClaims(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created PersistentVolumeClaim: %s of size %s. If your cluster has dynamic storage provisioning, you don't have to do anything. Otherwise you have to create PersistentVolume to make PVC work", t.Name, kubernetes.PVCRequestSize)
case *routeapi.Route:
_, err := oc.Routes(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Route: %s", t.Name)
case *kapi.Pod:
_, err := kclient.Pods(namespace).Create(t)
if err != nil {
return err
}
log.Infof("Successfully created Pod: %s", t.Name)
}
}
if !opt.EmptyVols || opt.Volumes != "emptyDir" {
pvcStr = ",pvc"
} else {
pvcStr = ""
}
fmt.Println("\nYour application has been deployed to OpenShift. You can run 'oc get dc,svc,is" + pvcStr + "' for details.")
return nil
}
//Undeploy removes deployed artifacts from OpenShift cluster
func (o *OpenShift) Undeploy(komposeObject kobject.KomposeObject, opt kobject.ConvertOptions) []error {
var errorList []error
//Convert komposeObject
objects, err := o.Transform(komposeObject, opt)
if err != nil {
errorList = append(errorList, err)
return errorList
}
oc, err := o.getOpenShiftClient()
if err != nil {
errorList = append(errorList, err)
return errorList
}
kclient, ns, err := o.GetKubernetesClient()
if err != nil {
errorList = append(errorList, err)
return errorList
}
namespace := ns
if opt.IsNamespaceFlag {
namespace = opt.Namespace
}
log.Infof("Deleting application in %q namespace", namespace)
for _, v := range objects {
label := labels.SelectorFromSet(labels.Set(map[string]string{transformer.Selector: v.(meta.Object).GetName()}))
options := kapi.ListOptions{LabelSelector: label}
komposeLabel := map[string]string{transformer.Selector: v.(meta.Object).GetName()}
switch t := v.(type) {
case *imageapi.ImageStream:
//delete imageStream
imageStream, err := oc.ImageStreams(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range imageStream.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
err = oc.ImageStreams(namespace).Delete(t.Name)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted ImageStream: %s", t.Name)
}
}
case *buildapi.BuildConfig:
buildConfig, err := oc.BuildConfigs(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range buildConfig.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
bcreaper := buildconfigreaper.NewBuildConfigReaper(oc)
err := bcreaper.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted BuildConfig: %s", t.Name)
}
}
case *deployapi.DeploymentConfig:
// delete deploymentConfig
deploymentConfig, err := oc.DeploymentConfigs(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range deploymentConfig.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
dcreaper := deploymentconfigreaper.NewDeploymentConfigReaper(oc, kclient)
err := dcreaper.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted DeploymentConfig: %s", t.Name)
}
}
case *kapi.Service:
//delete svc
svc, err := kclient.Services(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range svc.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpService, err := kubectl.ReaperFor(kapi.Kind("Service"), kclient)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpService.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Service: %s", t.Name)
}
}
case *kapi.PersistentVolumeClaim:
// delete pvc
pvc, err := kclient.PersistentVolumeClaims(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range pvc.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
err = kclient.PersistentVolumeClaims(namespace).Delete(t.Name)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted PersistentVolumeClaim: %s", t.Name)
}
}
case *routeapi.Route:
// delete route
route, err := oc.Routes(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range route.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
err = oc.Routes(namespace).Delete(t.Name)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Route: %s", t.Name)
}
}
case *kapi.Pod:
//delete pods
pod, err := kclient.Pods(namespace).List(options)
if err != nil {
errorList = append(errorList, err)
break
}
for _, l := range pod.Items {
if reflect.DeepEqual(l.Labels, komposeLabel) {
rpPod, err := kubectl.ReaperFor(kapi.Kind("Pod"), kclient)
if err != nil {
errorList = append(errorList, err)
break
}
//FIXME: gracePeriod is nil
err = rpPod.Stop(namespace, t.Name, TIMEOUT*time.Second, nil)
if err != nil {
errorList = append(errorList, err)
break
}
log.Infof("Successfully deleted Pod: %s", t.Name)
}
}
}
}
return errorList
}

View File

@ -25,8 +25,4 @@ import (
type Transformer interface {
// Transform converts KomposeObject to transformer specific objects.
Transform(kobject.KomposeObject, kobject.ConvertOptions) ([]runtime.Object, error)
// Deploy deploys KomposeObject to provider
Deploy(komposeObject kobject.KomposeObject, opt kobject.ConvertOptions) error
// Undeploy deletes/undeploys KomposeObject from provider
Undeploy(komposeObject kobject.KomposeObject, opt kobject.ConvertOptions) []error
}

202
vendor/github.com/coreos/etcd/LICENSE generated vendored
View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View File

@ -1,820 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: auth.proto
// DO NOT EDIT!
/*
Package authpb is a generated protocol buffer package.
It is generated from these files:
auth.proto
It has these top-level messages:
User
Permission
Role
*/
package authpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
io "io"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Permission_Type int32
const (
READ Permission_Type = 0
WRITE Permission_Type = 1
READWRITE Permission_Type = 2
)
var Permission_Type_name = map[int32]string{
0: "READ",
1: "WRITE",
2: "READWRITE",
}
var Permission_Type_value = map[string]int32{
"READ": 0,
"WRITE": 1,
"READWRITE": 2,
}
func (x Permission_Type) String() string {
return proto.EnumName(Permission_Type_name, int32(x))
}
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
// User is a single entry in the bucket authUsers
type User struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
// Permission is a single entity
type Permission struct {
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
}
func (m *Permission) Reset() { *m = Permission{} }
func (m *Permission) String() string { return proto.CompactTextString(m) }
func (*Permission) ProtoMessage() {}
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
// Role is a single entry in the bucket authRoles
type Role struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
}
func (m *Role) Reset() { *m = Role{} }
func (m *Role) String() string { return proto.CompactTextString(m) }
func (*Role) ProtoMessage() {}
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
func init() {
proto.RegisterType((*User)(nil), "authpb.User")
proto.RegisterType((*Permission)(nil), "authpb.Permission")
proto.RegisterType((*Role)(nil), "authpb.Role")
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
}
func (m *User) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *User) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.Password) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
i += copy(data[i:], m.Password)
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
data[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *Permission) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Permission) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PermType != 0 {
data[i] = 0x8
i++
i = encodeVarintAuth(data, i, uint64(m.PermType))
}
if len(m.Key) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if len(m.RangeEnd) > 0 {
data[i] = 0x1a
i++
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
i += copy(data[i:], m.RangeEnd)
}
return i, nil
}
func (m *Role) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Role) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.KeyPermission) > 0 {
for _, msg := range m.KeyPermission {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintAuth(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *User) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.Password)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
l = len(s)
n += 1 + l + sovAuth(uint64(l))
}
}
return n
}
func (m *Permission) Size() (n int) {
var l int
_ = l
if m.PermType != 0 {
n += 1 + sovAuth(uint64(m.PermType))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.RangeEnd)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
func (m *Role) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.KeyPermission) > 0 {
for _, e := range m.KeyPermission {
l = e.Size()
n += 1 + l + sovAuth(uint64(l))
}
}
return n
}
func sovAuth(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozAuth(x uint64) (n int) {
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *User) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: User: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
if m.Password == nil {
m.Password = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Permission) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
}
m.PermType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PermType |= (Permission_Type(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
if m.RangeEnd == nil {
m.RangeEnd = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Role) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Role: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.KeyPermission = append(m.KeyPermission, &Permission{})
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAuth(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthAuth
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipAuth(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
}

View File

@ -1,237 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"golang.org/x/net/context"
)
type Role struct {
Role string `json:"role"`
Permissions Permissions `json:"permissions"`
Grant *Permissions `json:"grant,omitempty"`
Revoke *Permissions `json:"revoke,omitempty"`
}
type Permissions struct {
KV rwPermission `json:"kv"`
}
type rwPermission struct {
Read []string `json:"read"`
Write []string `json:"write"`
}
type PermissionType int
const (
ReadPermission PermissionType = iota
WritePermission
ReadWritePermission
)
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
// interact with etcd's role creation and modification features.
func NewAuthRoleAPI(c Client) AuthRoleAPI {
return &httpAuthRoleAPI{
client: c,
}
}
type AuthRoleAPI interface {
// AddRole adds a role.
AddRole(ctx context.Context, role string) error
// RemoveRole removes a role.
RemoveRole(ctx context.Context, role string) error
// GetRole retrieves role details.
GetRole(ctx context.Context, role string) (*Role, error)
// GrantRoleKV grants a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// ListRoles lists roles.
ListRoles(ctx context.Context) ([]string, error)
}
type httpAuthRoleAPI struct {
client httpClient
}
type authRoleAPIAction struct {
verb string
name string
role *Role
}
type authRoleAPIList struct{}
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", l.name)
if l.role == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.role)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var roleList struct {
Roles []Role `json:"roles"`
}
if err = json.Unmarshal(body, &roleList); err != nil {
return nil, err
}
ret := make([]string, 0, len(roleList.Roles))
for _, r := range roleList.Roles {
ret = append(ret, r.Role)
}
return ret, nil
}
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
role := &Role{
Role: rolename,
}
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "DELETE",
name: rolename,
})
}
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &authRoleAPIAction{
verb: "GET",
name: rolename,
})
}
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
var out rwPermission
switch permType {
case ReadPermission:
out.Read = prefixes
case WritePermission:
out.Write = prefixes
case ReadWritePermission:
out.Read = prefixes
out.Write = prefixes
}
return out
}
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Grant: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Revoke: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil
}

View File

@ -1,320 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"path"
"golang.org/x/net/context"
)
var (
defaultV2AuthPrefix = "/v2/auth"
)
type User struct {
User string `json:"user"`
Password string `json:"password,omitempty"`
Roles []string `json:"roles"`
Grant []string `json:"grant,omitempty"`
Revoke []string `json:"revoke,omitempty"`
}
// userListEntry is the user representation given by the server for ListUsers
type userListEntry struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type UserRoles struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
if name != "" {
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
return &ep
}
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
return &ep
}
// NewAuthAPI constructs a new AuthAPI that uses HTTP to
// interact with etcd's general auth features.
func NewAuthAPI(c Client) AuthAPI {
return &httpAuthAPI{
client: c,
}
}
type AuthAPI interface {
// Enable auth.
Enable(ctx context.Context) error
// Disable auth.
Disable(ctx context.Context) error
}
type httpAuthAPI struct {
client httpClient
}
func (s *httpAuthAPI) Enable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"PUT"})
}
func (s *httpAuthAPI) Disable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"DELETE"})
}
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
resp, body, err := s.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
type authAPIAction struct {
verb string
}
func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "enable", "")
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
type authError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e authError) Error() string {
return e.Message
}
// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
// interact with etcd's user creation and modification features.
func NewAuthUserAPI(c Client) AuthUserAPI {
return &httpAuthUserAPI{
client: c,
}
}
type AuthUserAPI interface {
// AddUser adds a user.
AddUser(ctx context.Context, username string, password string) error
// RemoveUser removes a user.
RemoveUser(ctx context.Context, username string) error
// GetUser retrieves user details.
GetUser(ctx context.Context, username string) (*User, error)
// GrantUser grants a user some permission roles.
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
// RevokeUser revokes some permission roles from a user.
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
// ChangePassword changes the user's password.
ChangePassword(ctx context.Context, username string, password string) (*User, error)
// ListUsers lists the users.
ListUsers(ctx context.Context) ([]string, error)
}
type httpAuthUserAPI struct {
client httpClient
}
type authUserAPIAction struct {
verb string
username string
user *User
}
type authUserAPIList struct{}
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", l.username)
if l.user == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.user)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
resp, body, err := u.client.Do(ctx, &authUserAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var userList struct {
Users []userListEntry `json:"users"`
}
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
ret := make([]string, 0, len(userList.Users))
for _, u := range userList.Users {
ret = append(ret, u.User)
}
return ret, nil
}
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
user := &User{
User: username,
Password: password,
}
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "DELETE",
username: username,
})
}
func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
return u.modUser(ctx, &authUserAPIAction{
verb: "GET",
username: username,
})
}
func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Grant: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Revoke: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
user := &User{
User: username,
Password: password,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
if err = json.Unmarshal(body, &user); err != nil {
var userR UserRoles
if urerr := json.Unmarshal(body, &userR); urerr != nil {
return nil, err
}
user.User = userR.User
for _, r := range userR.Roles {
user.Roles = append(user.Roles, r.Role)
}
}
return &user, nil
}

View File

@ -1,18 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// borrowed from golang/net/context/ctxhttp/cancelreq.go
package client
import "net/http"
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View File

@ -1,609 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"reflect"
"sort"
"strconv"
"sync"
"time"
"golang.org/x/net/context"
)
var (
ErrNoEndpoints = errors.New("client: no endpoints available")
ErrTooManyRedirects = errors.New("client: too many redirects")
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
var DefaultTransport CancelableTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
type EndpointSelectionMode int
const (
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
// As the name implies, the client object will pick a node from the members
// of the cluster in a random fashion. If the cluster has three members, A, B,
// and C, the client picks any node from its three members as its request
// destination.
EndpointSelectionRandom EndpointSelectionMode = iota
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
// requests are sent directly to the cluster leader. This reduces
// forwarding roundtrips compared to making requests to etcd followers
// who then forward them to the cluster leader. In the event of a leader
// failure, however, clients configured this way cannot prioritize among
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
// maintain its knowledge of current cluster state.
//
// This mode should be used with Client.AutoSync().
EndpointSelectionPrioritizeLeader
)
type Config struct {
// Endpoints defines a set of URLs (schemes, hosts and ports only)
// that can be used to communicate with a logical etcd cluster. For
// example, a three-node cluster could be provided like so:
//
// Endpoints: []string{
// "http://node1.example.com:2379",
// "http://node2.example.com:2379",
// "http://node3.example.com:2379",
// }
//
// If multiple endpoints are provided, the Client will attempt to
// use them all in the event that one or more of them are unusable.
//
// If Client.Sync is ever called, the Client may cache an alternate
// set of endpoints to continue operation.
Endpoints []string
// Transport is used by the Client to drive HTTP requests. If not
// provided, DefaultTransport will be used.
Transport CancelableTransport
// CheckRedirect specifies the policy for handling HTTP redirects.
// If CheckRedirect is not nil, the Client calls it before
// following an HTTP redirect. The sole argument is the number of
// requests that have already been made. If CheckRedirect returns
// an error, Client.Do will not make any further requests and return
// the error back it to the caller.
//
// If CheckRedirect is nil, the Client uses its default policy,
// which is to stop after 10 consecutive requests.
CheckRedirect CheckRedirectFunc
// Username specifies the user credential to add as an authorization header
Username string
// Password is the password for the specified user to add as an authorization header
// to the request.
Password string
// HeaderTimeoutPerRequest specifies the time limit to wait for response
// header in a single request made by the Client. The timeout includes
// connection time, any redirects, and header wait time.
//
// For non-watch GET request, server returns the response body immediately.
// For PUT/POST/DELETE request, server will attempt to commit request
// before responding, which is expected to take `100ms + 2 * RTT`.
// For watch request, server returns the header immediately to notify Client
// watch start. But if server is behind some kind of proxy, the response
// header may be cached at proxy, and Client cannot rely on this behavior.
//
// Especially, wait request will ignore this timeout.
//
// One API call may send multiple requests to different etcd servers until it
// succeeds. Use context of the API to specify the overall timeout.
//
// A HeaderTimeoutPerRequest of zero means no timeout.
HeaderTimeoutPerRequest time.Duration
// SelectionMode is an EndpointSelectionMode enum that specifies the
// policy for choosing the etcd cluster node to which requests are sent.
SelectionMode EndpointSelectionMode
}
func (cfg *Config) transport() CancelableTransport {
if cfg.Transport == nil {
return DefaultTransport
}
return cfg.Transport
}
func (cfg *Config) checkRedirect() CheckRedirectFunc {
if cfg.CheckRedirect == nil {
return DefaultCheckRedirect
}
return cfg.CheckRedirect
}
// CancelableTransport mimics net/http.Transport, but requires that
// the object also support request cancellation.
type CancelableTransport interface {
http.RoundTripper
CancelRequest(req *http.Request)
}
type CheckRedirectFunc func(via int) error
// DefaultCheckRedirect follows up to 10 redirects, but no more.
var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
if via > 10 {
return ErrTooManyRedirects
}
return nil
}
type Client interface {
// Sync updates the internal cache of the etcd cluster's membership.
Sync(context.Context) error
// AutoSync periodically calls Sync() every given interval.
// The recommended sync interval is 10 seconds to 1 minute, which does
// not bring too much overhead to server and makes client catch up the
// cluster change in time.
//
// The example to use it:
//
// for {
// err := client.AutoSync(ctx, 10*time.Second)
// if err == context.DeadlineExceeded || err == context.Canceled {
// break
// }
// log.Print(err)
// }
AutoSync(context.Context, time.Duration) error
// Endpoints returns a copy of the current set of API endpoints used
// by Client to resolve HTTP requests. If Sync has ever been called,
// this may differ from the initial Endpoints provided in the Config.
Endpoints() []string
// SetEndpoints sets the set of API endpoints used by Client to resolve
// HTTP requests. If the given endpoints are not valid, an error will be
// returned
SetEndpoints(eps []string) error
httpClient
}
func New(cfg Config) (Client, error) {
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
selectionMode: cfg.SelectionMode,
}
if cfg.Username != "" {
c.credentials = &credentials{
username: cfg.Username,
password: cfg.Password,
}
}
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
return nil, err
}
return c, nil
}
type httpClient interface {
Do(context.Context, httpAction) (*http.Response, []byte, error)
}
func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
return func(ep url.URL) httpClient {
return &redirectFollowingHTTPClient{
checkRedirect: cr,
client: &simpleHTTPClient{
transport: tr,
endpoint: ep,
headerTimeout: headerTimeout,
},
}
}
}
type credentials struct {
username string
password string
}
type httpClientFactory func(url.URL) httpClient
type httpAction interface {
HTTPRequest(url.URL) *http.Request
}
type httpClusterClient struct {
clientFactory httpClientFactory
endpoints []url.URL
pinned int
credentials *credentials
sync.RWMutex
rand *rand.Rand
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
mAPI := NewMembersAPI(c)
leader, err := mAPI.Leader(context.Background())
if err != nil {
return "", err
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
if len(eps) == 0 {
return ErrNoEndpoints
}
neps := make([]url.URL, len(eps))
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return err
}
neps[i] = *u
}
switch c.selectionMode {
case EndpointSelectionRandom:
c.endpoints = shuffleEndpoints(c.rand, neps)
c.pinned = 0
case EndpointSelectionPrioritizeLeader:
c.endpoints = neps
lep, err := c.getLeaderEndpoint()
if err != nil {
return ErrNoLeaderEndpoint
}
for i := range c.endpoints {
if c.endpoints[i].String() == lep {
c.pinned = i
break
}
}
// If endpoints doesn't have the lu, just keep c.pinned = 0.
// Forwarding between follower and leader would be required but it works.
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
}
return nil
}
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
action := act
c.RLock()
leps := len(c.endpoints)
eps := make([]url.URL, leps)
n := copy(eps, c.endpoints)
pinned := c.pinned
if c.credentials != nil {
action = &authedAction{
act: act,
credentials: *c.credentials,
}
}
c.RUnlock()
if leps == 0 {
return nil, nil, ErrNoEndpoints
}
if leps != n {
return nil, nil, errors.New("unable to pick endpoint: copy failed")
}
var resp *http.Response
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
hc := c.clientFactory(eps[k])
resp, body, err = hc.Do(ctx, action)
if err != nil {
cerr.Errors = append(cerr.Errors, err)
if err == ctx.Err() {
return nil, nil, ctx.Err()
}
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
}
continue
}
if k != pinned {
c.Lock()
c.pinned = k
c.Unlock()
}
return resp, body, nil
}
return nil, nil, cerr
}
func (c *httpClusterClient) Endpoints() []string {
c.RLock()
defer c.RUnlock()
eps := make([]string, len(c.endpoints))
for i, ep := range c.endpoints {
eps[i] = ep.String()
}
return eps
}
func (c *httpClusterClient) Sync(ctx context.Context) error {
mAPI := NewMembersAPI(c)
ms, err := mAPI.List(ctx)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
var eps []string
for _, m := range ms {
eps = append(eps, m.ClientURLs...)
}
sort.Sort(sort.StringSlice(eps))
ceps := make([]string, len(c.endpoints))
for i, cep := range c.endpoints {
ceps[i] = cep.String()
}
sort.Sort(sort.StringSlice(ceps))
// fast path if no change happens
// this helps client to pin the endpoint when no cluster change
if reflect.DeepEqual(eps, ceps) {
return nil
}
return c.SetEndpoints(eps)
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
err := c.Sync(ctx)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
type roundTripResponse struct {
resp *http.Response
err error
}
type simpleHTTPClient struct {
transport CancelableTransport
endpoint url.URL
headerTimeout time.Duration
}
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
req := act.HTTPRequest(c.endpoint)
if err := printcURL(req); err != nil {
return nil, nil, err
}
isWait := false
if req != nil && req.URL != nil {
ws := req.URL.Query().Get("wait")
if len(ws) != 0 {
var err error
isWait, err = strconv.ParseBool(ws)
if err != nil {
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
}
}
}
var hctx context.Context
var hcancel context.CancelFunc
if !isWait && c.headerTimeout > 0 {
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
} else {
hctx, hcancel = context.WithCancel(ctx)
}
defer hcancel()
reqcancel := requestCanceler(c.transport, req)
rtchan := make(chan roundTripResponse, 1)
go func() {
resp, err := c.transport.RoundTrip(req)
rtchan <- roundTripResponse{resp: resp, err: err}
close(rtchan)
}()
var resp *http.Response
var err error
select {
case rtresp := <-rtchan:
resp, err = rtresp.resp, rtresp.err
case <-hctx.Done():
// cancel and wait for request to actually exit before continuing
reqcancel()
rtresp := <-rtchan
resp = rtresp.resp
switch {
case ctx.Err() != nil:
err = ctx.Err()
case hctx.Err() != nil:
err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
default:
panic("failed to get error from context")
}
}
// always check for resp nil-ness to deal with possible
// race conditions between channels above
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
done <- struct{}{}
}()
select {
case <-ctx.Done():
resp.Body.Close()
<-done
return nil, nil, ctx.Err()
case <-done:
}
return resp, body, err
}
type authedAction struct {
act httpAction
credentials credentials
}
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
r := a.act.HTTPRequest(url)
r.SetBasicAuth(a.credentials.username, a.credentials.password)
return r
}
type redirectFollowingHTTPClient struct {
client httpClient
checkRedirect CheckRedirectFunc
}
func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
next := act
for i := 0; i < 100; i++ {
if i > 0 {
if err := r.checkRedirect(i); err != nil {
return nil, nil, err
}
}
resp, body, err := r.client.Do(ctx, next)
if err != nil {
return nil, nil, err
}
if resp.StatusCode/100 == 3 {
hdr := resp.Header.Get("Location")
if hdr == "" {
return nil, nil, fmt.Errorf("Location header not set")
}
loc, err := url.Parse(hdr)
if err != nil {
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
}
next = &redirectedHTTPAction{
action: act,
location: *loc,
}
continue
}
return resp, body, nil
}
return nil, nil, errTooManyRedirectChecks
}
type redirectedHTTPAction struct {
action httpAction
location url.URL
}
func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
orig := r.action.HTTPRequest(ep)
orig.URL = &r.location
return orig
}
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
p := r.Perm(len(eps))
neps := make([]url.URL, len(eps))
for i, k := range p {
neps[i] = eps[k]
}
return neps
}

View File

@ -1,37 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import "fmt"
type ClusterError struct {
Errors []error
}
func (ce *ClusterError) Error() string {
s := ErrClusterUnavailable.Error()
for i, e := range ce.Errors {
s += fmt.Sprintf("; error #%d: %s\n", i, e)
}
return s
}
func (ce *ClusterError) Detail() string {
s := ""
for i, e := range ce.Errors {
s += fmt.Sprintf("error #%d: %s\n", i, e)
}
return s
}

View File

@ -1,70 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
)
var (
cURLDebug = false
)
func EnablecURLDebug() {
cURLDebug = true
}
func DisablecURLDebug() {
cURLDebug = false
}
// printcURL prints the cURL equivalent request to stderr.
// It returns an error if the body of the request cannot
// be read.
// The caller MUST cancel the request if there is an error.
func printcURL(req *http.Request) error {
if !cURLDebug {
return nil
}
var (
command string
b []byte
err error
)
if req.URL != nil {
command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
}
if req.Body != nil {
b, err = ioutil.ReadAll(req.Body)
if err != nil {
return err
}
command += fmt.Sprintf(" -d %q", string(b))
}
fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
// reset body
body := bytes.NewBuffer(b)
req.Body = ioutil.NopCloser(body)
return nil
}

View File

@ -1,21 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}

View File

@ -1,73 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package client provides bindings for the etcd APIs.
Create a Config and exchange it for a Client:
import (
"net/http"
"github.com/coreos/etcd/client"
"golang.org/x/net/context"
)
cfg := client.Config{
Endpoints: []string{"http://127.0.0.1:2379"},
Transport: DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
// handle error
}
Clients are safe for concurrent use by multiple goroutines.
Create a KeysAPI using the Client, then use it to interact with etcd:
kAPI := client.NewKeysAPI(c)
// create a new key /foo with the value "bar"
_, err = kAPI.Create(context.Background(), "/foo", "bar")
if err != nil {
// handle error
}
// delete the newly created key only if the value is still "bar"
_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
if err != nil {
// handle error
}
Use a custom context to set timeouts on your operations:
import "time"
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// set a new key, ignoring it's previous state
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
if err != nil {
if err == context.DeadlineExceeded {
// request took longer than 5s
} else {
// handle error
}
}
*/
package client

File diff suppressed because it is too large Load Diff

View File

@ -1,677 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/coreos/etcd/pkg/pathutil"
"github.com/ugorji/go/codec"
"golang.org/x/net/context"
)
const (
ErrorCodeKeyNotFound = 100
ErrorCodeTestFailed = 101
ErrorCodeNotFile = 102
ErrorCodeNotDir = 104
ErrorCodeNodeExist = 105
ErrorCodeRootROnly = 107
ErrorCodeDirNotEmpty = 108
ErrorCodeUnauthorized = 110
ErrorCodePrevValueRequired = 201
ErrorCodeTTLNaN = 202
ErrorCodeIndexNaN = 203
ErrorCodeInvalidField = 209
ErrorCodeInvalidForm = 210
ErrorCodeRaftInternal = 300
ErrorCodeLeaderElect = 301
ErrorCodeWatcherCleared = 400
ErrorCodeEventIndexCleared = 401
)
type Error struct {
Code int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause"`
Index uint64 `json:"index"`
}
func (e Error) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
}
var (
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
ErrEmptyBody = errors.New("client: response body is empty")
)
// PrevExistType is used to define an existence condition when setting
// or deleting Nodes.
type PrevExistType string
const (
PrevIgnore = PrevExistType("")
PrevExist = PrevExistType("true")
PrevNoExist = PrevExistType("false")
)
var (
defaultV2KeysPrefix = "/v2/keys"
)
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
// API over HTTP.
func NewKeysAPI(c Client) KeysAPI {
return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
}
// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
// to provide a custom base URL path. This should only be used in
// very rare cases.
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
return &httpKeysAPI{
client: c,
prefix: p,
}
}
type KeysAPI interface {
// Get retrieves a set of Nodes from etcd
Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
// Set assigns a new value to a Node identified by a given key. The caller
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
// then value is ignored.
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
// Delete removes a Node identified by the given key, optionally destroying
// all of its children as well. The caller may define a set of required
// conditions in an DeleteOptions object.
Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
// Create is an alias for Set w/ PrevExist=false
Create(ctx context.Context, key, value string) (*Response, error)
// CreateInOrder is used to atomically create in-order keys within the given directory.
CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
// Update is an alias for Set w/ PrevExist=true
Update(ctx context.Context, key, value string) (*Response, error)
// Watcher builds a new Watcher targeted at a specific Node identified
// by the given key. The Watcher may be configured at creation time
// through a WatcherOptions object. The returned Watcher is designed
// to emit events that happen to a Node, and optionally to its children.
Watcher(key string, opts *WatcherOptions) Watcher
}
type WatcherOptions struct {
// AfterIndex defines the index after-which the Watcher should
// start emitting events. For example, if a value of 5 is
// provided, the first event will have an index >= 6.
//
// Setting AfterIndex to 0 (default) means that the Watcher
// should start watching for events starting at the current
// index, whatever that may be.
AfterIndex uint64
// Recursive specifies whether or not the Watcher should emit
// events that occur in children of the given keyspace. If set
// to false (default), events will be limited to those that
// occur for the exact key.
Recursive bool
}
type CreateInOrderOptions struct {
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
}
type SetOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Set operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
//
// PrevValue is ignored if Dir=true
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Set operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// PrevExist specifies whether the Node must currently exist
// (PrevExist) or not (PrevNoExist). If the caller does not
// care about existence, set PrevExist to PrevIgnore, or simply
// leave it unset.
PrevExist PrevExistType
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
// Refresh set to true means a TTL value can be updated
// without firing a watch or changing the node value. A
// value must not be provided when refreshing a key.
Refresh bool
// Dir specifies whether or not this Node should be created as a directory.
Dir bool
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
// If set, the response will only contain the current value when the request fails.
NoValueOnSuccess bool
}
type GetOptions struct {
// Recursive defines whether or not all children of the Node
// should be returned.
Recursive bool
// Sort instructs the server whether or not to sort the Nodes.
// If true, the Nodes are sorted alphabetically by key in
// ascending order (A to z). If false (default), the Nodes will
// not be sorted and the ordering used should not be considered
// predictable.
Sort bool
// Quorum specifies whether it gets the latest committed value that
// has been applied in quorum of members, which ensures external
// consistency (or linearizability).
Quorum bool
}
type DeleteOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Delete operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Delete operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// Recursive defines whether or not all children of the Node
// should be deleted. If set to true, all children of the Node
// identified by the given key will be deleted. If left unset
// or explicitly set to false, only a single Node will be
// deleted.
Recursive bool
// Dir specifies whether or not this Node should be removed as a directory.
Dir bool
}
type Watcher interface {
// Next blocks until an etcd event occurs, then returns a Response
// representing that event. The behavior of Next depends on the
// WatcherOptions used to construct the Watcher. Next is designed to
// be called repeatedly, each time blocking until a subsequent event
// is available.
//
// If the provided context is cancelled, Next will return a non-nil
// error. Any other failures encountered while waiting for the next
// event (connection issues, deserialization failures, etc) will
// also result in a non-nil error.
Next(context.Context) (*Response, error)
}
type Response struct {
// Action is the name of the operation that occurred. Possible values
// include get, set, delete, update, create, compareAndSwap,
// compareAndDelete and expire.
Action string `json:"action"`
// Node represents the state of the relevant etcd Node.
Node *Node `json:"node"`
// PrevNode represents the previous state of the Node. PrevNode is non-nil
// only if the Node existed before the action occurred and the action
// caused a change to the Node.
PrevNode *Node `json:"prevNode"`
// Index holds the cluster-level index at the time the Response was generated.
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
}
type Node struct {
// Key represents the unique location of this Node (e.g. "/foo/bar").
Key string `json:"key"`
// Dir reports whether node describes a directory.
Dir bool `json:"dir,omitempty"`
// Value is the current data stored on this Node. If this Node
// is a directory, Value will be empty.
Value string `json:"value"`
// Nodes holds the children of this Node, only if this Node is a directory.
// This slice of will be arbitrarily deep (children, grandchildren, great-
// grandchildren, etc.) if a recursive Get or Watch request were made.
Nodes Nodes `json:"nodes"`
// CreatedIndex is the etcd index at-which this Node was created.
CreatedIndex uint64 `json:"createdIndex"`
// ModifiedIndex is the etcd index at-which this Node was last modified.
ModifiedIndex uint64 `json:"modifiedIndex"`
// Expiration is the server side expiration time of the key.
Expiration *time.Time `json:"expiration,omitempty"`
// TTL is the time to live of the key in second.
TTL int64 `json:"ttl,omitempty"`
}
func (n *Node) String() string {
return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
}
// TTLDuration returns the Node's TTL as a time.Duration object
func (n *Node) TTLDuration() time.Duration {
return time.Duration(n.TTL) * time.Second
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int { return len(ns) }
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
type httpKeysAPI struct {
client httpClient
prefix string
}
func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
act := &setAction{
Prefix: k.prefix,
Key: key,
Value: val,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.PrevExist = opts.PrevExist
act.TTL = opts.TTL
act.Refresh = opts.Refresh
act.Dir = opts.Dir
act.NoValueOnSuccess = opts.NoValueOnSuccess
}
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
}
func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
act := &createInOrderAction{
Prefix: k.prefix,
Dir: dir,
Value: val,
}
if opts != nil {
act.TTL = opts.TTL
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
}
func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
act := &deleteAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.Dir = opts.Dir
act.Recursive = opts.Recursive
}
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
act := &getAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
act.Sorted = opts.Sort
act.Quorum = opts.Quorum
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
act := waitAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
if opts.AfterIndex > 0 {
act.WaitIndex = opts.AfterIndex + 1
}
}
return &httpWatcher{
client: k.client,
nextWait: act,
}
}
type httpWatcher struct {
client httpClient
nextWait waitAction
}
func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
for {
httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
if err != nil {
return nil, err
}
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
if err != nil {
if err == ErrEmptyBody {
continue
}
return nil, err
}
hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
return resp, nil
}
}
// v2KeysURL forms a URL representing the location of a key.
// The endpoint argument represents the base URL of an etcd
// server. The prefix is the path needed to route from the
// provided endpoint's path to the root of the keys API
// (typically "/v2/keys").
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
// We concatenate all parts together manually. We cannot use
// path.Join because it does not reserve trailing slash.
// We call CanonicalURLPath to further cleanup the path.
if prefix != "" && prefix[0] != '/' {
prefix = "/" + prefix
}
if key != "" && key[0] != '/' {
key = "/" + key
}
ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
return &ep
}
type getAction struct {
Prefix string
Key string
Recursive bool
Sorted bool
Quorum bool
}
func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, g.Prefix, g.Key)
params := u.Query()
params.Set("recursive", strconv.FormatBool(g.Recursive))
params.Set("sorted", strconv.FormatBool(g.Sorted))
params.Set("quorum", strconv.FormatBool(g.Quorum))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type waitAction struct {
Prefix string
Key string
WaitIndex uint64
Recursive bool
}
func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, w.Prefix, w.Key)
params := u.Query()
params.Set("wait", "true")
params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
params.Set("recursive", strconv.FormatBool(w.Recursive))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type setAction struct {
Prefix string
Key string
Value string
PrevValue string
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
NoValueOnSuccess bool
}
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
form := url.Values{}
// we're either creating a directory or setting a key
if a.Dir {
params.Set("dir", strconv.FormatBool(a.Dir))
} else {
// These options are only valid for setting a key
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
form.Add("value", a.Value)
}
// Options which apply to both setting a key and creating a dir
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.PrevExist != PrevIgnore {
params.Set("prevExist", string(a.PrevExist))
}
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
if a.Refresh {
form.Add("refresh", "true")
}
if a.NoValueOnSuccess {
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
}
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("PUT", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type deleteAction struct {
Prefix string
Key string
PrevValue string
PrevIndex uint64
Dir bool
Recursive bool
}
func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.Dir {
params.Set("dir", "true")
}
if a.Recursive {
params.Set("recursive", "true")
}
u.RawQuery = params.Encode()
req, _ := http.NewRequest("DELETE", u.String(), nil)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type createInOrderAction struct {
Prefix string
Dir string
Value string
TTL time.Duration
}
func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Dir)
form := url.Values{}
form.Add("value", a.Value)
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("POST", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
switch code {
case http.StatusOK, http.StatusCreated:
if len(body) == 0 {
return nil, ErrEmptyBody
}
res, err = unmarshalSuccessfulKeysResponse(header, body)
default:
err = unmarshalFailedKeysResponse(body)
}
return
}
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
var res Response
err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
if err != nil {
return nil, ErrInvalidJSON
}
if header.Get("X-Etcd-Index") != "" {
res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
if err != nil {
return nil, err
}
}
return &res, nil
}
func unmarshalFailedKeysResponse(body []byte) error {
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return ErrInvalidJSON
}
return etcdErr
}

View File

@ -1,304 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"golang.org/x/net/context"
"github.com/coreos/etcd/pkg/types"
)
var (
defaultV2MembersPrefix = "/v2/members"
defaultLeaderSuffix = "/leader"
)
type Member struct {
// ID is the unique identifier of this Member.
ID string `json:"id"`
// Name is a human-readable, non-unique identifier of this Member.
Name string `json:"name"`
// PeerURLs represents the HTTP(S) endpoints this Member uses to
// participate in etcd's consensus protocol.
PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member
// serves it's client-facing APIs.
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}
type memberCreateOrUpdateRequest struct {
PeerURLs types.URLs
}
func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{
PeerURLs: make([]string, len(m.PeerURLs)),
}
for i, u := range m.PeerURLs {
s.PeerURLs[i] = u.String()
}
return json.Marshal(&s)
}
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
// interact with etcd's membership API.
func NewMembersAPI(c Client) MembersAPI {
return &httpMembersAPI{
client: c,
}
}
type MembersAPI interface {
// List enumerates the current cluster membership.
List(ctx context.Context) ([]Member, error)
// Add instructs etcd to accept a new Member into the cluster.
Add(ctx context.Context, peerURL string) (*Member, error)
// Remove demotes an existing Member out of the cluster.
Remove(ctx context.Context, mID string) error
// Update instructs etcd to update an existing Member in the cluster.
Update(ctx context.Context, mID string, peerURLs []string) error
// Leader gets current leader of the cluster
Leader(ctx context.Context) (*Member, error)
}
type httpMembersAPI struct {
client httpClient
}
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
req := &membersAPIActionList{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var mCollection memberCollection
if err := json.Unmarshal(body, &mCollection); err != nil {
return nil, err
}
return []Member(mCollection), nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
urls, err := types.NewURLs([]string{peerURL})
if err != nil {
return nil, err
}
req := &membersAPIActionAdd{peerURLs: urls}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return nil, err
}
return nil, merr
}
var memb Member
if err := json.Unmarshal(body, &memb); err != nil {
return nil, err
}
return &memb, nil
}
func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
urls, err := types.NewURLs(peerURLs)
if err != nil {
return err
}
req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return err
}
return merr
}
return nil
}
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
req := &membersAPIActionRemove{memberID: memberID}
resp, _, err := m.client.Do(ctx, req)
if err != nil {
return err
}
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
req := &membersAPIActionLeader{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var leader Member
if err := json.Unmarshal(body, &leader); err != nil {
return nil, err
}
return &leader, nil
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type membersAPIActionRemove struct {
memberID string
}
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, d.memberID)
req, _ := http.NewRequest("DELETE", u.String(), nil)
return req
}
type membersAPIActionAdd struct {
peerURLs types.URLs
}
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
type membersAPIActionUpdate struct {
memberID string
peerURLs types.URLs
}
func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
u.Path = path.Join(u.Path, a.memberID)
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
func assertStatusCode(got int, want ...int) (err error) {
for _, w := range want {
if w == got {
return nil
}
}
return fmt.Errorf("unexpected status code %d", got)
}
type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
return &ep
}
type membersError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e membersError) Error() string {
return e.Message
}

View File

@ -1,65 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"fmt"
"net"
"net/url"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV
)
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
// Discover looks up the etcd servers for the domain.
func (d *srvDiscover) Discover(domain string) ([]string, error) {
var urls []*url.URL
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
return nil
}
errHTTPS := updateURLs("etcd-client-ssl", "https")
errHTTP := updateURLs("etcd-client", "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return endpoints, nil
}

View File

@ -1,53 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"regexp"
)
var (
roleNotFoundRegExp *regexp.Regexp
userNotFoundRegExp *regexp.Regexp
)
func init() {
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
}
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
return cErr.Code == ErrorCodeKeyNotFound
}
return false
}
// IsRoleNotFound returns true if the error means role not found of v2 API.
func IsRoleNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return roleNotFoundRegExp.MatchString(ae.Message)
}
return false
}
// IsUserNotFound returns true if the error means user not found of v2 API.
func IsUserNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return userNotFoundRegExp.MatchString(ae.Message)
}
return false
}

View File

@ -1,230 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"fmt"
"strings"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
AuthEnableResponse pb.AuthEnableResponse
AuthDisableResponse pb.AuthDisableResponse
AuthenticateResponse pb.AuthenticateResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
AuthUserGetResponse pb.AuthUserGetResponse
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
AuthRoleGetResponse pb.AuthRoleGetResponse
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
AuthUserListResponse pb.AuthUserListResponse
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
Permission authpb.Permission
)
const (
PermRead = authpb.READ
PermWrite = authpb.WRITE
PermReadWrite = authpb.READWRITE
)
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// AuthDisable disables auth of an etcd cluster.
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
// UserDelete deletes a user from an etcd cluster.
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
// UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// UserGrantRole grants a role to a user.
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
// UserGet gets a detailed information of a user.
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
// UserList gets a list of all users.
UserList(ctx context.Context) (*AuthUserListResponse, error)
// UserRevokeRole revokes a role of a user.
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role.
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
// RoleGet gets a detailed information of a role.
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
// RoleList gets a list of all roles.
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
// RoleRevokePermission revokes a permission from a role.
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
// RoleDelete deletes a role.
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
}
type auth struct {
c *Client
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func NewAuth(c *Client) Auth {
conn := c.ActiveConnection()
return &auth{
conn: c.ActiveConnection(),
remote: pb.NewAuthClient(conn),
c: c,
}
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
if ok {
return PermissionType(val), nil
}
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
return &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}, nil
}

View File

@ -1,214 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// addrs are the client's endpoints for grpc
addrs []grpc.Address
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// mu protects upEps, pinAddr, and connectingAddr
mu sync.RWMutex
// upEps holds the current endpoints that have an active connection
upEps map[string]struct{}
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
upc chan struct{}
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// intialization and shutdown.
pinAddr string
closed bool
}
func newSimpleBalancer(eps []string) *simpleBalancer {
notifyCh := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
notifyCh <- addrs
sb := &simpleBalancer{
addrs: addrs,
notifyCh: notifyCh,
readyc: make(chan struct{}),
upEps: make(map[string]struct{}),
upc: make(chan struct{}),
host2ep: getHost2ep(eps),
}
return sb
}
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *simpleBalancer) getEndpoint(host string) string {
b.mu.Lock()
defer b.mu.Unlock()
return b.host2ep[host]
}
func getHost2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}
func (b *simpleBalancer) updateAddrs(eps []string) {
np := getHost2ep(eps)
b.mu.Lock()
defer b.mu.Unlock()
match := len(np) == len(b.host2ep)
for k, v := range np {
if b.host2ep[k] != v {
match = false
break
}
}
if match {
// same endpoints, so no need to update address
return
}
b.host2ep = np
addrs := make([]grpc.Address, 0, len(eps))
for i := range eps {
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
}
b.addrs = addrs
b.notifyCh <- addrs
}
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Or our simplerBalancer
// might panic since b.upc is closed.
if b.closed {
return func(err error) {}
}
if len(b.upEps) == 0 {
// notify waiting Get()s and pin first connected address
close(b.upc)
b.pinAddr = addr.Addr
}
b.upEps[addr.Addr] = struct{}{}
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
b.mu.Lock()
delete(b.upEps, addr.Addr)
if len(b.upEps) == 0 && b.pinAddr != "" {
b.upc = make(chan struct{})
} else if b.pinAddr == addr.Addr {
// choose new random up endpoint
for k := range b.upEps {
b.pinAddr = k
break
}
}
b.mu.Unlock()
}
}
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var addr string
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
addr = b.pinAddr
upEps := len(b.upEps)
b.mu.RUnlock()
if addr == "" {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if upEps > 0 {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *simpleBalancer) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
return nil
}
b.closed = true
close(b.notifyCh)
// terminate all waiting Get()s
b.pinAddr = ""
if len(b.upEps) == 0 {
close(b.upc)
}
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View File

@ -1,367 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"strings"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
)
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
balancer *simpleBalancer
retryWrapper retryRpcFunc
ctx context.Context
cancel context.CancelFunc
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromConfigFile creates a new etcdv3 client from a configuration file.
func NewFromConfigFile(path string) (*Client, error) {
cfg, err := configFromFile(path)
if err != nil {
return nil, err
}
return New(*cfg)
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
return toErr(c.ctx, c.conn.Close())
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.cfg.Endpoints = eps
c.balancer.updateAddrs(eps)
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
func (c *Client) Sync(ctx context.Context) error {
mresp, err := c.MemberList(ctx)
if err != nil {
return err
}
var eps []string
for _, m := range mresp.Members {
eps = append(eps, m.ClientURLs...)
}
c.SetEndpoints(eps...)
return nil
}
func (c *Client) autoSync() {
if c.cfg.AutoSyncInterval == time.Duration(0) {
return
}
for {
select {
case <-c.ctx.Done():
return
case <-time.After(c.cfg.AutoSyncInterval):
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
logger.Println("Auto sync endpoints failed:", err)
}
}
}
}
type authTokenCredential struct {
token string
}
func (cred authTokenCredential) RequireTransportSecurity() bool {
return false
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
return map[string]string{
"token": cred.token,
}, nil
}
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
proto = "tcp"
host = endpoint
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
}
scheme = url.Scheme
// strip scheme:// prefix since grpc dials by host
host = url.Host
switch url.Scheme {
case "http", "https":
case "unix":
proto = "unix"
default:
proto, host = "", ""
}
return
}
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
creds = c.creds
switch scheme {
case "unix":
case "http":
creds = nil
case "https":
if creds != nil {
break
}
tlsconfig := &tls.Config{}
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
creds = nil
}
return
}
// dialSetupOpts gives the dial opts prior to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
}
opts = append(opts, dopts...)
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
if proto == "" {
return nil, fmt.Errorf("unknown scheme for %q", host)
}
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
return net.DialTimeout(proto, host, t)
}
opts = append(opts, grpc.WithDialer(f))
creds := c.creds
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
creds = c.processCreds(scheme)
}
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
return opts
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
return c.dial(endpoint)
}
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts := c.dialSetupOpts(endpoint, dopts...)
host := getHost(endpoint)
if c.Username != "" && c.Password != "" {
// use dial options without dopts to avoid reusing the client balancer
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
if err != nil {
return nil, err
}
defer auth.close()
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
}
conn, err := grpc.Dial(host, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
}
var creds *credentials.TransportCredentials
if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS)
creds = &c
}
// use a temporary skeleton client to bootstrap first connection
ctx, cancel := context.WithCancel(context.TODO())
client := &Client{
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
client.balancer = newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil {
return nil, err
}
client.conn = conn
client.retryWrapper = client.newRetryWrapper()
// wait for a connection
if cfg.DialTimeout > 0 {
hasConn := false
waitc := time.After(cfg.DialTimeout)
select {
case <-client.balancer.readyc:
hasConn = true
case <-ctx.Done():
case <-waitc:
}
if !hasConn {
client.cancel()
conn.Close()
return nil, grpc.ErrClientConnTimeout
}
}
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
go client.autoSync()
return client, nil
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
code := grpc.Code(err)
// Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc?
return code != codes.Unavailable && code != codes.Internal
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
code := grpc.Code(err)
switch code {
case codes.DeadlineExceeded:
fallthrough
case codes.Canceled:
if ctx.Err() != nil {
err = ctx.Err()
}
case codes.Unavailable:
err = ErrNoAvailableEndpoints
case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing
}
return err
}

View File

@ -1,102 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
Member pb.Member
MemberListResponse pb.MemberListResponse
MemberAddResponse pb.MemberAddResponse
MemberRemoveResponse pb.MemberRemoveResponse
MemberUpdateResponse pb.MemberUpdateResponse
)
type Cluster interface {
// MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
// MemberRemove removes an existing member from the cluster.
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
// MemberUpdate updates the peer addresses of the member.
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
}
type cluster struct {
remote pb.ClusterClient
}
func NewCluster(c *Client) Cluster {
return &cluster{remote: RetryClusterClient(c)}
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r)
if err == nil {
return (*MemberAddResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r)
if err == nil {
return (*MemberRemoveResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
if err == nil {
return (*MemberListResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}

View File

@ -1,53 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
type CompactOp struct {
revision int64
physical bool
}
// CompactOption configures compact operation.
type CompactOption func(*CompactOp)
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
for _, opt := range opts {
opt(op)
}
}
// OpCompact wraps slice CompactOption to create a CompactOp.
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
}
func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes compact RPC call wait until
// the compaction is physically applied to the local database
// such that compacted entries are totally removed from the
// backend database.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

View File

@ -1,91 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type CompareTarget int
type CompareResult int
const (
CompareVersion CompareTarget = iota
CompareCreated
CompareModified
CompareValue
)
type Cmp pb.Compare
func Compare(cmp Cmp, result string, v interface{}) Cmp {
var r pb.Compare_CompareResult
switch result {
case "=":
r = pb.Compare_EQUAL
case ">":
r = pb.Compare_GREATER
case "<":
r = pb.Compare_LESS
default:
panic("Unknown result op")
}
cmp.Result = r
switch cmp.Target {
case pb.Compare_VALUE:
val, ok := v.(string)
if !ok {
panic("bad compare value")
}
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
case pb.Compare_VERSION:
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
case pb.Compare_CREATE:
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
default:
panic("Unknown compare type")
}
return cmp
}
func Value(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
}
func Version(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
}
func CreateRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
}
func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
}
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
}
if v, ok := val.(int); ok {
return int64(v)
}
panic("bad value")
}

View File

@ -1,113 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml"
)
type Config struct {
// Endpoints is a list of URLs
Endpoints []string
// AutoSyncInterval is the interval to update endpoints with its latest members.
// 0 disables auto-sync. By default auto-sync is disabled.
AutoSyncInterval time.Duration
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
}
type yamlConfig struct {
Endpoints []string `json:"endpoints"`
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"`
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
Certfile string `json:"cert-file"`
Keyfile string `json:"key-file"`
CAfile string `json:"ca-file"`
}
func configFromFile(fpath string) (*Config, error) {
b, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
yc := &yamlConfig{}
err = yaml.Unmarshal(b, yc)
if err != nil {
return nil, err
}
cfg := &Config{
Endpoints: yc.Endpoints,
AutoSyncInterval: yc.AutoSyncInterval,
DialTimeout: yc.DialTimeout,
}
if yc.InsecureTransport {
cfg.TLS = nil
return cfg, nil
}
var (
cert *tls.Certificate
cp *x509.CertPool
)
if yc.Certfile != "" && yc.Keyfile != "" {
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
if err != nil {
return nil, err
}
}
if yc.CAfile != "" {
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
if err != nil {
return nil, err
}
}
tlscfg := &tls.Config{
MinVersion: tls.VersionTLS10,
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
RootCAs: cp,
}
if cert != nil {
tlscfg.Certificates = []tls.Certificate{*cert}
}
cfg.TLS = tlscfg
return cfg, nil
}

View File

@ -1,64 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package clientv3 implements the official Go etcd client for v3.
//
// Create client using `clientv3.New`:
//
// cli, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
// DialTimeout: 5 * time.Second,
// })
// if err != nil {
// // handle error!
// }
// defer cli.Close()
//
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify client request timeout, pass context.WithTimeout to APIs:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
// cancel()
// if err != nil {
// // handle error!
// }
// // use the response
//
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
// Clients are safe for concurrent use by multiple goroutines.
//
// etcd client returns 2 types of errors:
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
//
// Here is the example code to handle client errors:
//
// resp, err := kvc.Put(ctx, "", "")
// if err != nil {
// if err == context.Canceled {
// // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
// // process (verr.Errors)
// } else {
// // bad cluster endpoints, which are not etcd servers
// }
// }
//
package clientv3

View File

@ -1,166 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
CompactResponse pb.CompactionResponse
PutResponse pb.PutResponse
GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse
TxnResponse pb.TxnResponse
)
type KV interface {
// Put puts a key-value pair into etcd.
// Note that key,value can be plain bytes array and string is
// an immutable representation of that bytes array.
// To get a string of bytes, do string([]byte(0x10, 0x20)).
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
// Get retrieves keys.
// By default, Get will return the value for "key", if any.
// When passed WithRange(end), Get will return the keys in the range [key, end).
// When passed WithFromKey(), Get returns keys greater than or equal to key.
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
// if the required revision is compacted, the request will fail with ErrCompacted .
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
// When passed WithSort(), the keys will be sorted.
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
// Delete deletes a key, or optionally using WithRange(end), [key, end).
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
// Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
// Do applies a single Op on KV without a transaction.
// Do is useful when declaring operations to be issued at a later time
// whereas Get/Put/Delete are for better suited for when the operation
// should be immediately issued at time of declaration.
// Do applies a single Op on KV without a transaction.
// Do is useful when creating arbitrary operations to be issued at a
// later time; the user can range over the operations, calling Do to
// execute them. Get/Put/Delete, on the other hand, are best suited
// for when the operation should be issued at the time of declaration.
Do(ctx context.Context, op Op) (OpResponse, error)
// Txn creates a transaction.
Txn(ctx context.Context) Txn
}
type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
}
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
type kv struct {
remote pb.KVClient
}
func NewKV(c *Client) KV {
return &kv{remote: RetryKVClient(c)}
}
func NewKVFromKVClient(remote pb.KVClient) KV {
return &kv{remote: remote}
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...))
return r.put, toErr(ctx, err)
}
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...))
return r.get, toErr(ctx, err)
}
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...))
return r.del, toErr(ctx, err)
}
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*CompactResponse)(resp), err
}
func (kv *kv) Txn(ctx context.Context) Txn {
return &txn{
kv: kv,
ctx: ctx,
}
}
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
resp, err = kv.remote.Put(ctx, r)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
resp, err = kv.remote.DeleteRange(ctx, r)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
return OpResponse{}, err
}

View File

@ -1,504 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseID int64
)
// LeaseGrantResponse is used to convert the protobuf grant response.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
Error string
}
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
type LeaseTimeToLiveResponse struct {
*pb.ResponseHeader
ID LeaseID `json:"id"`
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
TTL int64 `json:"ttl"`
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
GrantedTTL int64 `json:"granted-ttl"`
// Keys is the list of keys attached to this lease.
Keys [][]byte `json:"keys"`
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
defaultTTL = 5 * time.Second
// a small buffer to store unsent lease responses.
leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease.
NoLease LeaseID = 0
)
type Lease interface {
// Grant creates a new lease.
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
// Revoke revokes the given lease.
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
// TimeToLive retrieves the lease information of the given lease ID.
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
// KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
// should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
// Close releases all resources Lease keeps for efficient communication
// with the etcd server.
Close() error
}
type lessor struct {
mu sync.Mutex // guards all fields
// donec is closed when recvKeepAliveLoop stops
donec chan struct{}
remote pb.LeaseClient
stream pb.Lease_LeaseKeepAliveClient
streamCancel context.CancelFunc
stopCtx context.Context
stopCancel context.CancelFunc
keepAlives map[LeaseID]*keepAlive
// firstKeepAliveTimeout is the timeout for the first keepalive request
// before the actual TTL is known to the lease client
firstKeepAliveTimeout time.Duration
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
type keepAlive struct {
chs []chan<- *LeaseKeepAliveResponse
ctxs []context.Context
// deadline is the time the keep alive channels close if no response
deadline time.Time
// nextKeepAlive is when to send the next keep alive message
nextKeepAlive time.Time
// donec is closed on lease revoke, expiration, or cancel.
donec chan struct{}
}
func NewLease(c *Client) Lease {
l := &lessor{
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
remote: RetryLeaseClient(c),
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
go l.recvKeepAliveLoop()
go l.deadlineLoop()
return l
}
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(cctx, r)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(cctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(cctx, r)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := toLeaseTimeToLiveRequest(id, opts...)
resp, err := l.remote.LeaseTimeToLive(cctx, r)
if err == nil {
gresp := &LeaseTimeToLiveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
GrantedTTL: resp.GrantedTTL,
Keys: resp.Keys,
}
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(cctx, err)
}
}
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
l.mu.Lock()
ka, ok := l.keepAlives[id]
if !ok {
// create fresh keep alive
ka = &keepAlive{
chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx},
deadline: time.Now().Add(l.firstKeepAliveTimeout),
nextKeepAlive: time.Now(),
donec: make(chan struct{}),
}
l.keepAlives[id] = ka
} else {
// add channel and context to existing keep alive
ka.ctxs = append(ka.ctxs, ctx)
ka.chs = append(ka.chs, ch)
}
l.mu.Unlock()
go l.keepAliveCtxCloser(id, ctx, ka.donec)
return ch, nil
}
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
resp, err := l.keepAliveOnce(cctx, id)
if err == nil {
if resp.TTL == 0 {
err = rpctypes.ErrLeaseNotFound
}
return resp, err
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) Close() error {
l.stopCancel()
<-l.donec
return nil
}
func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
select {
case <-donec:
return
case <-l.donec:
return
case <-ctx.Done():
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[id]
if !ok {
return
}
// close channel and remove context if still associated with keep alive
for i, c := range ka.ctxs {
if c == ctx {
close(ka.chs[i])
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
break
}
}
// remove if no one more listeners
if len(ka.chs) == 0 {
delete(l.keepAlives, id)
}
}
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil {
return nil, toErr(ctx, err)
}
resp, rerr := stream.Recv()
if rerr != nil {
return nil, toErr(ctx, rerr)
}
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
return karesp, nil
}
func (l *lessor) recvKeepAliveLoop() {
defer func() {
l.mu.Lock()
close(l.donec)
for _, ka := range l.keepAlives {
ka.Close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
}()
stream, serr := l.resetRecv()
for serr == nil {
resp, err := stream.Recv()
if err != nil {
if isHaltErr(l.stopCtx, err) {
return
}
stream, serr = l.resetRecv()
continue
}
l.recvKeepAlive(resp)
}
}
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
if err := l.newStream(); err != nil {
return nil, err
}
stream := l.getKeepAliveStream()
go l.sendKeepAliveLoop(stream)
return stream, nil
}
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[karesp.ID]
if !ok {
return
}
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID)
ka.Close()
return
}
// send update to all channels
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
for _, ch := range ka.chs {
select {
case ch <- karesp:
ka.nextKeepAlive = nextKeepAlive
default:
}
}
}
// deadlineLoop reaps any keep alive channels that have not received a response
// within the lease TTL
func (l *lessor) deadlineLoop() {
for {
select {
case <-time.After(time.Second):
case <-l.donec:
return
}
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
delete(l.keepAlives, id)
}
}
l.mu.Unlock()
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
select {
case <-time.After(500 * time.Millisecond):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
var tosend []LeaseID
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.nextKeepAlive.Before(now) {
tosend = append(tosend, id)
}
}
l.mu.Unlock()
for _, id := range tosend {
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
if err := stream.Send(r); err != nil {
// TODO do something with this error?
return
}
}
}
}
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.stream
}
func (l *lessor) newStream() error {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
if err != nil {
cancel()
return toErr(sctx, err)
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.stream.CloseSend()
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
return nil
}
func (ka *keepAlive) Close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)
}
}
// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done
// should be closed when the work is finished. When done fires, cancelWhenStop will release
// its internal resource.
func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} {
done := make(chan struct{}, 1)
go func() {
select {
case <-stopc:
case <-done:
}
cancel()
}()
return done
}

View File

@ -1,82 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"io/ioutil"
"log"
"sync"
"google.golang.org/grpc/grpclog"
)
// Logger is the logger used by client library.
// It implements grpclog.Logger interface.
type Logger grpclog.Logger
var (
logger settableLogger
)
type settableLogger struct {
l grpclog.Logger
mu sync.RWMutex
}
func init() {
// disable client side logs by default
logger.mu.Lock()
logger.l = log.New(ioutil.Discard, "", 0)
// logger has to override the grpclog at initialization so that
// any changes to the grpclog go through logger with locking
// instead of through SetLogger
//
// now updates only happen through settableLogger.set
grpclog.SetLogger(&logger)
logger.mu.Unlock()
}
// SetLogger sets client-side Logger. By default, logs are disabled.
func SetLogger(l Logger) {
logger.set(l)
}
// GetLogger returns the current logger.
func GetLogger() Logger {
return logger.get()
}
func (s *settableLogger) set(l Logger) {
s.mu.Lock()
logger.l = l
s.mu.Unlock()
}
func (s *settableLogger) get() Logger {
s.mu.RLock()
l := logger.l
s.mu.RUnlock()
return l
}
// implement the grpclog.Logger interface
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }

View File

@ -1,164 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"io"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
DefragmentResponse pb.DefragmentResponse
AlarmResponse pb.AlarmResponse
AlarmMember pb.AlarmMember
StatusResponse pb.StatusResponse
)
type Maintenance interface {
// AlarmList gets all active alarms.
AlarmList(ctx context.Context) (*AlarmResponse, error)
// AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment defragments storage backend of the etcd member with given endpoint.
// Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members
// at the same time.
// To defragment multiple members in the cluster, user need to call defragment multiple
// times with different endpoints.
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
// Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend.
Snapshot(ctx context.Context) (io.ReadCloser, error)
}
type maintenance struct {
c *Client
remote pb.MaintenanceClient
}
func NewMaintenance(c *Client) Maintenance {
return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)}
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_GET,
MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_DEACTIVATE,
MemberID: am.MemberID,
Alarm: am.Alarm,
}
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx)
if err != nil {
return nil, toErr(ctx, err)
}
ret := AlarmResponse{}
for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil {
return nil, toErr(ctx, derr)
}
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
}
return &ret, nil
}
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*DefragmentResponse)(resp), nil
}
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*StatusResponse)(resp), nil
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
pr, pw := io.Pipe()
go func() {
for {
resp, err := ss.Recv()
if err != nil {
pw.CloseWithError(err)
return
}
if resp == nil && err == nil {
break
}
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
pw.Close()
}()
return pr, nil
}

View File

@ -1,389 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
type opType int
const (
// A default Op has opType 0, which is invalid.
tRange opType = iota + 1
tPut
tDeleteRange
)
var (
noPrefixEnd = []byte{0}
)
// Op represents an Operation that kv can execute.
type Op struct {
t opType
key []byte
end []byte
// for range
limit int64
sort *SortOption
serializable bool
keysOnly bool
countOnly bool
minModRev int64
maxModRev int64
minCreateRev int64
maxCreateRev int64
// for range, watch
rev int64
// for watch, put, delete
prevKV bool
// progressNotify is for progress updates.
progressNotify bool
// createdNotify is for created event
createdNotify bool
// filters for watchers
filterPut bool
filterDelete bool
// for put
val []byte
leaseID LeaseID
}
func (op Op) toRangeRequest() *pb.RangeRequest {
if op.t != tRange {
panic("op.t != tRange")
}
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
MinModRevision: op.minModRev,
MaxModRevision: op.maxModRev,
MinCreateRevision: op.minCreateRev,
MaxCreateRevision: op.maxCreateRev,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
return r
}
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
default:
panic("Unknown Op")
}
}
func (op Op) isWrite() bool {
return op.t != tRange
}
func OpGet(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
return ret
}
func OpDelete(key string, opts ...OpOption) Op {
ret := Op{t: tDeleteRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in delete")
case ret.limit != 0:
panic("unexpected limit in delete")
case ret.rev != 0:
panic("unexpected revision in delete")
case ret.sort != nil:
panic("unexpected sort in delete")
case ret.serializable:
panic("unexpected serializable in delete")
case ret.countOnly:
panic("unexpected countOnly in delete")
case ret.minModRev != 0, ret.maxModRev != 0:
panic("unexpected mod revision filter in delete")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
panic("unexpected create revision filter in delete")
case ret.filterDelete, ret.filterPut:
panic("unexpected filter in delete")
case ret.createdNotify:
panic("unexpected createdNotify in delete")
}
return ret
}
func OpPut(key, val string, opts ...OpOption) Op {
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
ret.applyOpts(opts)
switch {
case ret.end != nil:
panic("unexpected range in put")
case ret.limit != 0:
panic("unexpected limit in put")
case ret.rev != 0:
panic("unexpected revision in put")
case ret.sort != nil:
panic("unexpected sort in put")
case ret.serializable:
panic("unexpected serializable in put")
case ret.countOnly:
panic("unexpected countOnly in put")
case ret.minModRev != 0, ret.maxModRev != 0:
panic("unexpected mod revision filter in put")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
panic("unexpected create revision filter in put")
case ret.filterDelete, ret.filterPut:
panic("unexpected filter in put")
case ret.createdNotify:
panic("unexpected createdNotify in put")
}
return ret
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in watch")
case ret.limit != 0:
panic("unexpected limit in watch")
case ret.sort != nil:
panic("unexpected sort in watch")
case ret.serializable:
panic("unexpected serializable in watch")
case ret.countOnly:
panic("unexpected countOnly in watch")
case ret.minModRev != 0, ret.maxModRev != 0:
panic("unexpected mod revision filter in watch")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
panic("unexpected create revision filter in watch")
}
return ret
}
func (op *Op) applyOpts(opts []OpOption) {
for _, opt := range opts {
opt(op)
}
}
// OpOption configures Operations like Get, Put, Delete.
type OpOption func(*Op)
// WithLease attaches a lease ID to a key in 'Put' request.
func WithLease(leaseID LeaseID) OpOption {
return func(op *Op) { op.leaseID = leaseID }
}
// WithLimit limits the number of results to return from 'Get' request.
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
// WithRev specifies the store revision for 'Get' request.
// Or the start revision of 'Watch' request.
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
// WithSort specifies the ordering in 'Get' request. It requires
// 'WithRange' and/or 'WithPrefix' to be specified too.
// 'target' specifies the target to sort by: key, version, revisions, value.
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
func WithSort(target SortTarget, order SortOrder) OpOption {
return func(op *Op) {
if target == SortByKey && order == SortAscend {
// If order != SortNone, server fetches the entire key-space,
// and then applies the sort and limit, if provided.
// Since current mvcc.Range implementation returns results
// sorted by keys in lexiographically ascending order,
// client should ignore SortOrder if the target is SortByKey.
order = SortNone
}
op.sort = &SortOption{target, order}
}
}
// GetPrefixRangeEnd gets the range end of the prefix.
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
func GetPrefixRangeEnd(prefix string) string {
return string(getPrefix([]byte(prefix)))
}
func getPrefix(key []byte) []byte {
end := make([]byte, len(key))
copy(end, key)
for i := len(end) - 1; i >= 0; i-- {
if end[i] < 0xff {
end[i] = end[i] + 1
end = end[:i+1]
return end
}
}
// next prefix does not exist (e.g., 0xffff);
// default to WithFromKey policy
return noPrefixEnd
}
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
op.end = getPrefix(op.key)
}
}
// WithRange specifies the range of 'Get' or 'Delete' requests.
// For example, 'Get' requests with 'WithRange(end)' returns
// the keys in the range [key, end).
func WithRange(endKey string) OpOption {
return func(op *Op) { op.end = []byte(endKey) }
}
// WithFromKey specifies the range of 'Get' or 'Delete' requests
// to be equal or greater than the key in the argument.
func WithFromKey() OpOption { return WithRange("\x00") }
// WithSerializable makes 'Get' request serializable. By default,
// it's linearizable. Serializable requests are better for lower latency
// requirement.
func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true }
}
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
// values will be omitted.
func WithKeysOnly() OpOption {
return func(op *Op) { op.keysOnly = true }
}
// WithCountOnly makes the 'Get' request return only the count of keys.
func WithCountOnly() OpOption {
return func(op *Op) { op.countOnly = true }
}
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
// WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
// WithLastCreate gets the key with the latest creation revision in the request range.
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
// WithFirstKey gets the lexically first key in the request range.
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
// WithLastKey gets the lexically last key in the request range.
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
// WithFirstRev gets the key with the oldest modification revision in the request range.
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
// WithLastRev gets the key with the latest modification revision in the request range.
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
// withTop gets the first key over the get's prefix given a sort order
func withTop(target SortTarget, order SortOrder) []OpOption {
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
}
// WithProgressNotify makes watch server send periodic progress updates
// every 10 minutes when there is no incoming events.
// Progress updates have zero events in WatchResponse.
func WithProgressNotify() OpOption {
return func(op *Op) {
op.progressNotify = true
}
}
// WithCreatedNotify makes watch server sends the created event.
func WithCreatedNotify() OpOption {
return func(op *Op) {
op.createdNotify = true
}
}
// WithFilterPut discards PUT events from the watcher.
func WithFilterPut() OpOption {
return func(op *Op) { op.filterPut = true }
}
// WithFilterDelete discards DELETE events from the watcher.
func WithFilterDelete() OpOption {
return func(op *Op) { op.filterDelete = true }
}
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
// nothing will be returned.
func WithPrevKV() OpOption {
return func(op *Op) {
op.prevKV = true
}
}
// LeaseOp represents an Operation that lease can execute.
type LeaseOp struct {
id LeaseID
// for TimeToLive
attachedKeys bool
}
// LeaseOption configures lease operations.
type LeaseOption func(*LeaseOp)
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
for _, opt := range opts {
opt(op)
}
}
// WithAttachedKeys requests lease timetolive API to return
// attached keys of given lease ID.
func WithAttachedKeys() LeaseOption {
return func(op *LeaseOp) { op.attachedKeys = true }
}
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
ret := &LeaseOp{id: id}
ret.applyOpts(opts)
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
}

View File

@ -1,253 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type rpcFunc func(ctx context.Context) error
type retryRpcFunc func(context.Context, rpcFunc)
func (c *Client) newRetryWrapper() retryRpcFunc {
return func(rpcCtx context.Context, f rpcFunc) {
for {
err := f(rpcCtx)
if err == nil {
return
}
// only retry if unavailable
if grpc.Code(err) != codes.Unavailable {
return
}
// always stop retry on etcd errors
eErr := rpctypes.Error(err)
if _, ok := eErr.(rpctypes.EtcdError); ok {
return
}
select {
case <-c.balancer.ConnectNotify():
case <-rpcCtx.Done():
case <-c.ctx.Done():
return
}
}
}
}
type retryKVClient struct {
pb.KVClient
retryf retryRpcFunc
}
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
func RetryKVClient(c *Client) pb.KVClient {
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
}
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Put(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
return err
})
return resp, err
}
type retryLeaseClient struct {
pb.LeaseClient
retryf retryRpcFunc
}
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
func RetryLeaseClient(c *Client) pb.LeaseClient {
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
return err
})
return resp, err
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
return err
})
return resp, err
}
type retryClusterClient struct {
pb.ClusterClient
retryf retryRpcFunc
}
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
return err
})
return resp, err
}
type retryAuthClient struct {
pb.AuthClient
retryf retryRpcFunc
}
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
return err
})
return resp, err
}

View File

@ -1,37 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
type SortTarget int
type SortOrder int
const (
SortNone SortOrder = iota
SortAscend
SortDescend
)
const (
SortByKey SortTarget = iota
SortByVersion
SortByCreateRevision
SortByModRevision
SortByValue
)
type SortOption struct {
Target SortTarget
Order SortOrder
}

View File

@ -1,166 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// Txn is the interface that wraps mini-transactions.
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
// passed into Else() will be executed.
If(cs ...Cmp) Txn
// Then takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() succeed.
Then(ops ...Op) Txn
// Else takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() fail.
Else(ops ...Op) Txn
// Commit tries to commit the transaction.
Commit() (*TxnResponse, error)
// TODO: add a Do for shortcut the txn without any condition?
}
type txn struct {
kv *kv
ctx context.Context
mu sync.Mutex
cif bool
cthen bool
celse bool
isWrite bool
cmps []*pb.Compare
sus []*pb.RequestOp
fas []*pb.RequestOp
}
func (txn *txn) If(cs ...Cmp) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cif {
panic("cannot call If twice!")
}
if txn.cthen {
panic("cannot call If after Then!")
}
if txn.celse {
panic("cannot call If after Else!")
}
txn.cif = true
for i := range cs {
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
}
return txn
}
func (txn *txn) Then(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cthen {
panic("cannot call Then twice!")
}
if txn.celse {
panic("cannot call Then after Else!")
}
txn.cthen = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.sus = append(txn.sus, op.toRequestOp())
}
return txn
}
func (txn *txn) Else(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.celse {
panic("cannot call Else twice!")
}
txn.celse = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.fas = append(txn.fas, op.toRequestOp())
}
return txn
}
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
for {
resp, err := txn.commit()
if err == nil {
return resp, err
}
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
var opts []grpc.CallOption
if !txn.isWrite {
opts = []grpc.CallOption{grpc.FailFast(false)}
}
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
if err != nil {
return nil, err
}
return (*TxnResponse)(resp), nil
}

View File

@ -1,706 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"fmt"
"sync"
"time"
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
EventTypeDelete = mvccpb.DELETE
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
)
type Event mvccpb.Event
type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
Close() error
}
type WatchResponse struct {
Header pb.ResponseHeader
Events []*Event
// CompactRevision is the minimum revision the watcher may receive.
CompactRevision int64
// Canceled is used to indicate watch failure.
// If the watch failed and the stream was about to close, before the channel is closed,
// the channel sends a final response that has Canceled set to true with a non-nil Err().
Canceled bool
// Created is used to indicate the creation of the watcher.
Created bool
closeErr error
}
// IsCreate returns true if the event tells that the key is newly created.
func (e *Event) IsCreate() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
}
// IsModify returns true if the event tells that a new value is put on existing key.
func (e *Event) IsModify() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
}
// Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error {
switch {
case wr.closeErr != nil:
return v3rpc.Error(wr.closeErr)
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted
case wr.Canceled:
return v3rpc.ErrFutureRev
}
return nil
}
// IsProgressNotify returns true if the WatchResponse is progress notification.
func (wr *WatchResponse) IsProgressNotify() bool {
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
}
// watcher implements the Watcher interface
type watcher struct {
remote pb.WatchClient
// mu protects the grpc streams map
mu sync.RWMutex
// streams holds all the active grpc streams keyed by ctx value.
streams map[string]*watchGrpcStream
}
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
type watchGrpcStream struct {
owner *watcher
remote pb.WatchClient
// ctx controls internal remote.Watch requests
ctx context.Context
// ctxKey is the key used when looking up this stream's context
ctxKey string
cancel context.CancelFunc
// substreams holds all active watchers on this grpc stream
substreams map[int64]*watcherStream
// resuming holds all resuming watchers on this grpc stream
resuming []*watcherStream
// reqc sends a watch request from Watch() to the main goroutine
reqc chan *watchRequest
// respc receives data from the watch client
respc chan *pb.WatchResponse
// stopc is sent to the main goroutine to stop all processing
stopc chan struct{}
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv to the watch stream reconn logic
errc chan error
// closingc gets the watcherStream of closing watchers
closingc chan *watcherStream
// resumec closes to signal that all substreams should begin resuming
resumec chan struct{}
// closeErr is the error that closed the watch stream
closeErr error
}
// watchRequest is issued by the subscriber to start a new watcher
type watchRequest struct {
ctx context.Context
key string
end string
rev int64
// send created notification event if this field is true
createdNotify bool
// progressNotify is for progress updates
progressNotify bool
// filters is the list of events to filter out
filters []pb.WatchCreateRequest_FilterType
// get the previous key-value pair before the event happens
prevKV bool
// retc receives a chan WatchResponse once the watcher is established
retc chan chan WatchResponse
}
// watcherStream represents a registered watcher
type watcherStream struct {
// initReq is the request that initiated this request
initReq watchRequest
// outc publishes watch responses to subscriber
outc chan WatchResponse
// recvc buffers watch responses before publishing
recvc chan *WatchResponse
// donec closes when the watcherStream goroutine stops.
donec chan struct{}
// closing is set to true when stream should be scheduled to shutdown.
closing bool
// id is the registered watch id on the grpc stream
id int64
// buf holds all events received from etcd but not yet consumed by the client
buf []*WatchResponse
}
func NewWatcher(c *Client) Watcher {
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
}
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
return &watcher{
remote: wc,
streams: make(map[string]*watchGrpcStream),
}
}
// never closes
var valCtxCh = make(chan struct{})
var zeroTime = time.Unix(0, 0)
// ctx with only the values; never Done
type valCtx struct{ context.Context }
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
func (vc *valCtx) Err() error { return nil }
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
ctx, cancel := context.WithCancel(&valCtx{inctx})
wgs := &watchGrpcStream{
owner: w,
remote: w.remote,
ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx),
cancel: cancel,
substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
stopc: make(chan struct{}),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
}
go wgs.run()
return wgs
}
// Watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
ow := opWatch(key, opts...)
var filters []pb.WatchCreateRequest_FilterType
if ow.filterPut {
filters = append(filters, pb.WatchCreateRequest_NOPUT)
}
if ow.filterDelete {
filters = append(filters, pb.WatchCreateRequest_NODELETE)
}
wr := &watchRequest{
ctx: ctx,
createdNotify: ow.createdNotify,
key: string(ow.key),
end: string(ow.end),
rev: ow.rev,
progressNotify: ow.progressNotify,
filters: filters,
prevKV: ow.prevKV,
retc: make(chan chan WatchResponse, 1),
}
ok := false
ctxKey := fmt.Sprintf("%v", ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
if w.streams == nil {
// closed
w.mu.Unlock()
ch := make(chan WatchResponse)
close(ch)
return ch
}
wgs := w.streams[ctxKey]
if wgs == nil {
wgs = w.newWatcherGrpcStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
reqc := wgs.reqc
w.mu.Unlock()
// couldn't create channel; return closed channel
closeCh := make(chan WatchResponse, 1)
// submit request
select {
case reqc <- wr:
ok = true
case <-wr.ctx.Done():
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
// receive channel
if ok {
select {
case ret := <-wr.retc:
return ret
case <-ctx.Done():
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
}
close(closeCh)
return closeCh
}
func (w *watcher) Close() (err error) {
w.mu.Lock()
streams := w.streams
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
err = werr
}
}
return err
}
func (w *watchGrpcStream) Close() (err error) {
close(w.stopc)
<-w.donec
select {
case err = <-w.errc:
default:
}
return toErr(w.ctx, err)
}
func (w *watcher) closeStream(wgs *watchGrpcStream) {
w.mu.Lock()
close(wgs.donec)
wgs.cancel()
if w.streams != nil {
delete(w.streams, wgs.ctxKey)
}
w.mu.Unlock()
}
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
if resp.WatchId == -1 {
// failed; no channel
close(ws.recvc)
return
}
ws.id = resp.WatchId
w.substreams[ws.id] = ws
}
func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
select {
case ws.outc <- *resp:
case <-ws.initReq.ctx.Done():
case <-time.After(closeSendErrTimeout):
}
close(ws.outc)
}
func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
// send channel response in case stream was never established
select {
case ws.initReq.retc <- ws.outc:
default:
}
// close subscriber's channel
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
} else {
close(ws.outc)
}
if ws.id != -1 {
delete(w.substreams, ws.id)
return
}
for i := range w.resuming {
if w.resuming[i] == ws {
w.resuming[i] = nil
return
}
}
}
// run is the root of the goroutines for managing a watcher client
func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
// substreams marked to close but goroutine still running; needed for
// avoiding double-closing recvc on grpc stream teardown
closing := make(map[*watcherStream]struct{})
defer func() {
w.closeErr = closeErr
// shutdown substreams and resuming substreams
for _, ws := range w.substreams {
if _, ok := closing[ws]; !ok {
close(ws.recvc)
}
}
for _, ws := range w.resuming {
if _, ok := closing[ws]; ws != nil && !ok {
close(ws.recvc)
}
}
w.joinSubstreams()
for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- {
w.closeSubstream(<-w.closingc)
}
w.owner.closeStream(w)
}()
// start a stream with the etcd grpc server
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
cancelSet := make(map[int64]struct{})
for {
select {
// Watch() requested
case wreq := <-w.reqc:
outc := make(chan WatchResponse, 1)
ws := &watcherStream{
initReq: *wreq,
id: -1,
outc: outc,
// unbufffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
}
ws.donec = make(chan struct{})
go w.serveSubstream(ws, w.resumec)
// queue up for watcher creation/resume
w.resuming = append(w.resuming, ws)
if len(w.resuming) == 1 {
// head of resume queue, can register a new watcher
wc.Send(ws.initReq.toPB())
}
// New events from the watch client
case pbresp := <-w.respc:
switch {
case pbresp.Created:
// response to head of queue creation
if ws := w.resuming[0]; ws != nil {
w.addSubstream(pbresp, ws)
w.dispatchEvent(pbresp)
w.resuming[0] = nil
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
case pbresp.Canceled:
delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok {
// signal to stream goroutine to update closingc
close(ws.recvc)
closing[ws] = struct{}{}
}
default:
// dispatch to appropriate watch stream
if ok := w.dispatchEvent(pbresp); ok {
break
}
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: pbresp.WatchId,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed to recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
cancelSet = make(map[int64]struct{})
case <-w.stopc:
return
case ws := <-w.closingc:
w.closeSubstream(ws)
delete(closing, ws)
if len(w.substreams)+len(w.resuming) == 0 {
// no more watchers on this stream, shutdown
return
}
}
}
}
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
// streams are marked as nil in the queue since the head must wait for its inflight registration.
func (w *watchGrpcStream) nextResume() *watcherStream {
for len(w.resuming) != 0 {
if w.resuming[0] != nil {
return w.resuming[0]
}
w.resuming = w.resuming[1:len(w.resuming)]
}
return nil
}
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
ws, ok := w.substreams[pbresp.WatchId]
if !ok {
return false
}
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
}
wr := &WatchResponse{
Header: *pbresp.Header,
Events: events,
CompactRevision: pbresp.CompactRevision,
Created: pbresp.Created,
Canceled: pbresp.Canceled,
}
select {
case ws.recvc <- wr:
case <-ws.donec:
return false
}
return true
}
// serveWatchClient forwards messages from the grpc stream to run()
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
for {
resp, err := wc.Recv()
if err != nil {
select {
case w.errc <- err:
case <-w.donec:
}
return
}
select {
case w.respc <- resp:
case <-w.donec:
return
}
}
}
// serveSubstream forwards watch responses from run() to the subscriber
func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
if ws.closing {
panic("created substream goroutine but substream is closing")
}
// nextRev is the minimum expected next revision
nextRev := ws.initReq.rev
resuming := false
defer func() {
if !resuming {
ws.closing = true
}
close(ws.donec)
if !resuming {
w.closingc <- ws
}
}()
emptyWr := &WatchResponse{}
for {
curWr := emptyWr
outc := ws.outc
if len(ws.buf) > 0 && ws.buf[0].Created {
select {
case ws.initReq.retc <- ws.outc:
// send first creation event and only if requested
if !ws.initReq.createdNotify {
ws.buf = ws.buf[1:]
}
default:
}
}
if len(ws.buf) > 0 {
curWr = ws.buf[0]
} else {
outc = nil
}
select {
case outc <- *curWr:
if ws.buf[0].Err() != nil {
return
}
ws.buf[0] = nil
ws.buf = ws.buf[1:]
case wr, ok := <-ws.recvc:
if !ok {
// shutdown from closeSubstream
return
}
// TODO pause channel if buffer gets too large
ws.buf = append(ws.buf, wr)
nextRev = wr.Header.Revision
if len(wr.Events) > 0 {
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
}
ws.initReq.rev = nextRev
case <-ws.initReq.ctx.Done():
return
case <-resumec:
resuming = true
return
}
}
// lazily send cancel message if events on missing id
}
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
// connect to grpc stream
wc, err := w.openWatchClient()
if err != nil {
return nil, v3rpc.Error(err)
}
// mark all substreams as resuming
if len(w.substreams)+len(w.resuming) > 0 {
close(w.resumec)
w.resumec = make(chan struct{})
w.joinSubstreams()
for _, ws := range w.substreams {
ws.id = -1
w.resuming = append(w.resuming, ws)
}
for _, ws := range w.resuming {
if ws == nil || ws.closing {
continue
}
ws.donec = make(chan struct{})
go w.serveSubstream(ws, w.resumec)
}
}
w.substreams = make(map[int64]*watcherStream)
// receive data from new grpc stream
go w.serveWatchClient(wc)
return wc, nil
}
// joinSubstream waits for all substream goroutines to complete
func (w *watchGrpcStream) joinSubstreams() {
for _, ws := range w.substreams {
<-ws.donec
}
for _, ws := range w.resuming {
if ws != nil {
<-ws.donec
}
}
}
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for {
select {
case <-w.stopc:
if err == nil {
return nil, context.Canceled
}
return nil, err
default:
}
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
break
}
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
}
return ws, nil
}
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{
StartRevision: wr.rev,
Key: []byte(wr.key),
RangeEnd: []byte(wr.end),
ProgressNotify: wr.progressNotify,
Filters: wr.filters,
PrevKv: wr.prevKV,
}
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}

View File

@ -1,16 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
package rpctypes

View File

@ -1,165 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
// server-side error
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out")
ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure")
ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster")
errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
}
// client-side error
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
ErrMemberExist = Error(ErrGRPCMemberExist)
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
ErrUserNotFound = Error(ErrGRPCUserNotFound)
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
ErrAuthFailed = Error(ErrGRPCAuthFailed)
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped)
ErrTimeout = Error(ErrGRPCTimeout)
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
ErrUnhealthy = Error(ErrGRPCUnhealthy)
)
// EtcdError defines gRPC server errors.
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
type EtcdError struct {
code codes.Code
desc string
}
// Code returns grpc/codes.Code.
// TODO: define clientv3/codes.Code.
func (e EtcdError) Code() codes.Code {
return e.code
}
func (e EtcdError) Error() string {
return e.desc
}
func Error(err error) error {
if err == nil {
return nil
}
verr, ok := errStringToError[grpc.ErrorDesc(err)]
if !ok { // not gRPC error
return err
}
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}

View File

@ -1,20 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
var (
MetadataRequireLeaderKey = "hasleader"
MetadataHasLeader = "true"
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,731 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: kv.proto
// DO NOT EDIT!
/*
Package mvccpb is a generated protocol buffer package.
It is generated from these files:
kv.proto
It has these top-level messages:
KeyValue
Event
*/
package mvccpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
io "io"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Event_EventType int32
const (
PUT Event_EventType = 0
DELETE Event_EventType = 1
)
var Event_EventType_name = map[int32]string{
0: "PUT",
1: "DELETE",
}
var Event_EventType_value = map[string]int32{
"PUT": 0,
"DELETE": 1,
}
func (x Event_EventType) String() string {
return proto.EnumName(Event_EventType_name, int32(x))
}
func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
type KeyValue struct {
// key is the key in bytes. An empty key is not allowed.
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
// create_revision is the revision of last creation on this key.
CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
// mod_revision is the revision of last modification on this key.
ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
// version is the version of the key. A deletion resets
// the version to zero and any modification of the key
// increases its version.
Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
// value is the value held by the key, in bytes.
Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
// lease is the ID of the lease that attached to key.
// When the attached lease expires, the key will be deleted.
// If lease is 0, then no lease is attached to the key.
Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
}
func (m *KeyValue) Reset() { *m = KeyValue{} }
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
func (*KeyValue) ProtoMessage() {}
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
type Event struct {
// type is the kind of event. If type is a PUT, it indicates
// new data has been stored to the key. If type is a DELETE,
// it indicates the key was deleted.
Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
// kv holds the KeyValue for the event.
// A PUT event contains current kv pair.
// A PUT event with kv.Version=1 indicates the creation of a key.
// A DELETE/EXPIRE event contains the deleted key with
// its modification revision set to the revision of deletion.
Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
// prev_kv holds the key-value pair before the event happens.
PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
}
func (m *Event) Reset() { *m = Event{} }
func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
func init() {
proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
proto.RegisterType((*Event)(nil), "mvccpb.Event")
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
}
func (m *KeyValue) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *KeyValue) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
data[i] = 0xa
i++
i = encodeVarintKv(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if m.CreateRevision != 0 {
data[i] = 0x10
i++
i = encodeVarintKv(data, i, uint64(m.CreateRevision))
}
if m.ModRevision != 0 {
data[i] = 0x18
i++
i = encodeVarintKv(data, i, uint64(m.ModRevision))
}
if m.Version != 0 {
data[i] = 0x20
i++
i = encodeVarintKv(data, i, uint64(m.Version))
}
if len(m.Value) > 0 {
data[i] = 0x2a
i++
i = encodeVarintKv(data, i, uint64(len(m.Value)))
i += copy(data[i:], m.Value)
}
if m.Lease != 0 {
data[i] = 0x30
i++
i = encodeVarintKv(data, i, uint64(m.Lease))
}
return i, nil
}
func (m *Event) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Event) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Type != 0 {
data[i] = 0x8
i++
i = encodeVarintKv(data, i, uint64(m.Type))
}
if m.Kv != nil {
data[i] = 0x12
i++
i = encodeVarintKv(data, i, uint64(m.Kv.Size()))
n1, err := m.Kv.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.PrevKv != nil {
data[i] = 0x1a
i++
i = encodeVarintKv(data, i, uint64(m.PrevKv.Size()))
n2, err := m.PrevKv.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func encodeFixed64Kv(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Kv(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintKv(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *KeyValue) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
if m.CreateRevision != 0 {
n += 1 + sovKv(uint64(m.CreateRevision))
}
if m.ModRevision != 0 {
n += 1 + sovKv(uint64(m.ModRevision))
}
if m.Version != 0 {
n += 1 + sovKv(uint64(m.Version))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
if m.Lease != 0 {
n += 1 + sovKv(uint64(m.Lease))
}
return n
}
func (m *Event) Size() (n int) {
var l int
_ = l
if m.Type != 0 {
n += 1 + sovKv(uint64(m.Type))
}
if m.Kv != nil {
l = m.Kv.Size()
n += 1 + l + sovKv(uint64(l))
}
if m.PrevKv != nil {
l = m.PrevKv.Size()
n += 1 + l + sovKv(uint64(l))
}
return n
}
func sovKv(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozKv(x uint64) (n int) {
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *KeyValue) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
}
m.CreateRevision = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.CreateRevision |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
}
m.ModRevision = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ModRevision |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Version |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
}
m.Lease = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Lease |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKv(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Event) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Event: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Type |= (Event_EventType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Kv == nil {
m.Kv = &KeyValue{}
}
if err := m.Kv.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.PrevKv == nil {
m.PrevKv = &KeyValue{}
}
if err := m.PrevKv.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKv(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipKv(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthKv
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipKv(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorKv = []byte{
// 303 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
}

View File

@ -1,22 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package fileutil
import "os"
// OpenDir opens a directory for syncing.
func OpenDir(path string) (*os.File, error) { return os.Open(path) }

View File

@ -1,46 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package fileutil
import (
"os"
"syscall"
)
// OpenDir opens a directory in windows with write access for syncing.
func OpenDir(path string) (*os.File, error) {
fd, err := openDir(path)
if err != nil {
return nil, err
}
return os.NewFile(uintptr(fd), path), nil
}
func openDir(path string) (fd syscall.Handle, err error) {
if len(path) == 0 {
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
}
pathp, err := syscall.UTF16PtrFromString(path)
if err != nil {
return syscall.InvalidHandle, err
}
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
createmode := uint32(syscall.OPEN_EXISTING)
fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
}

View File

@ -1,121 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileutil implements utility functions related to files and paths.
package fileutil
import (
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"github.com/coreos/pkg/capnslog"
)
const (
// PrivateFileMode grants owner to read/write a file.
PrivateFileMode = 0600
// PrivateDirMode grants owner to make/remove files inside the directory.
PrivateDirMode = 0700
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
)
// IsDirWriteable checks if dir is writable by writing and removing a file
// to dir. It returns nil if dir is writable.
func IsDirWriteable(dir string) error {
f := path.Join(dir, ".touch")
if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
return err
}
return os.Remove(f)
}
// ReadDir returns the filenames in the given directory in sorted order.
func ReadDir(dirpath string) ([]string, error) {
dir, err := os.Open(dirpath)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
func TouchDirAll(dir string) error {
// If path is already a directory, MkdirAll does nothing
// and returns nil.
err := os.MkdirAll(dir, PrivateDirMode)
if err != nil {
// if mkdirAll("a/text") and "text" is not
// a directory, this will return syscall.ENOTDIR
return err
}
return IsDirWriteable(dir)
}
// CreateDirAll is similar to TouchDirAll but returns error
// if the deepest directory was not empty.
func CreateDirAll(dir string) error {
err := TouchDirAll(dir)
if err == nil {
var ns []string
ns, err = ReadDir(dir)
if err != nil {
return err
}
if len(ns) != 0 {
err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
}
}
return err
}
func Exist(name string) bool {
_, err := os.Stat(name)
return err == nil
}
// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
// shorten the length of the file.
func ZeroToEnd(f *os.File) error {
// TODO: support FALLOC_FL_ZERO_RANGE
off, err := f.Seek(0, os.SEEK_CUR)
if err != nil {
return err
}
lenf, lerr := f.Seek(0, os.SEEK_END)
if lerr != nil {
return lerr
}
if err = f.Truncate(off); err != nil {
return err
}
// make sure blocks remain allocated
if err = Preallocate(f, lenf, true); err != nil {
return err
}
_, err = f.Seek(off, os.SEEK_SET)
return err
}

View File

@ -1,26 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"errors"
"os"
)
var (
ErrLocked = errors.New("fileutil: file already locked")
)
type LockedFile struct{ *os.File }

View File

@ -1,49 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!plan9,!solaris
package fileutil
import (
"os"
"syscall"
)
func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
return nil, err
}
if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
f.Close()
if err == syscall.EWOULDBLOCK {
err = ErrLocked
}
return nil, err
}
return &LockedFile{f}, nil
}
func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
return nil, err
}
if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
f.Close()
return nil, err
}
return &LockedFile{f}, err
}

View File

@ -1,96 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// This used to call syscall.Flock() but that call fails with EBADF on NFS.
// An alternative is lockf() which works on NFS but that call lets a process lock
// the same file twice. Instead, use Linux's non-standard open file descriptor
// locks which will block if the process already holds the file lock.
//
// constants from /usr/include/bits/fcntl-linux.h
const (
F_OFD_GETLK = 37
F_OFD_SETLK = 37
F_OFD_SETLKW = 38
)
var (
wrlck = syscall.Flock_t{
Type: syscall.F_WRLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
}
linuxTryLockFile = flockTryLockFile
linuxLockFile = flockLockFile
)
func init() {
// use open file descriptor locks if the system supports it
getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
linuxTryLockFile = ofdTryLockFile
linuxLockFile = ofdLockFile
}
}
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return linuxTryLockFile(path, flag, perm)
}
func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
return nil, err
}
flock := wrlck
if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
f.Close()
if err == syscall.EWOULDBLOCK {
err = ErrLocked
}
return nil, err
}
return &LockedFile{f}, nil
}
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return linuxLockFile(path, flag, perm)
}
func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
return nil, err
}
flock := wrlck
err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
if err != nil {
f.Close()
return nil, err
}
return &LockedFile{f}, err
}

View File

@ -1,45 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"syscall"
"time"
)
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
return nil, err
}
f, err := os.Open(path, flag, perm)
if err != nil {
return nil, ErrLocked
}
return &LockedFile{f}, nil
}
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
return nil, err
}
for {
f, err := os.OpenFile(path, flag, perm)
if err == nil {
return &LockedFile{f}, nil
}
time.Sleep(10 * time.Millisecond)
}
}

View File

@ -1,62 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build solaris
package fileutil
import (
"os"
"syscall"
)
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
f, err := os.OpenFile(path, flag, perm)
if err != nil {
return nil, err
}
if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
f.Close()
if err == syscall.EAGAIN {
err = ErrLocked
}
return nil, err
}
return &LockedFile{f}, nil
}
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
f, err := os.OpenFile(path, flag, perm)
if err != nil {
return nil, err
}
if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
f.Close()
return nil, err
}
return &LockedFile{f}, nil
}

View File

@ -1,29 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!plan9,!solaris,!linux
package fileutil
import (
"os"
)
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return flockTryLockFile(path, flag, perm)
}
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return flockLockFile(path, flag, perm)
}

View File

@ -1,125 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package fileutil
import (
"errors"
"fmt"
"os"
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
)
const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
LOCKFILE_EXCLUSIVE_LOCK = 2
LOCKFILE_FAIL_IMMEDIATELY = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := open(path, flag, perm)
if err != nil {
return nil, err
}
if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
f.Close()
return nil, err
}
return &LockedFile{f}, nil
}
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := open(path, flag, perm)
if err != nil {
return nil, err
}
if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
f.Close()
return nil, err
}
return &LockedFile{f}, nil
}
func open(path string, flag int, perm os.FileMode) (*os.File, error) {
if path == "" {
return nil, fmt.Errorf("cannot open empty filename")
}
var access uint32
switch flag {
case syscall.O_RDONLY:
access = syscall.GENERIC_READ
case syscall.O_WRONLY:
access = syscall.GENERIC_WRITE
case syscall.O_RDWR:
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
case syscall.O_WRONLY | syscall.O_CREAT:
access = syscall.GENERIC_ALL
default:
panic(fmt.Errorf("flag %v is not supported", flag))
}
fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
access,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil,
syscall.OPEN_ALWAYS,
syscall.FILE_ATTRIBUTE_NORMAL,
0)
if err != nil {
return nil, err
}
return os.NewFile(uintptr(fd), path), nil
}
func lockFile(fd syscall.Handle, flags uint32) error {
var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
flag |= flags
if fd == syscall.InvalidHandle {
return nil
}
err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err.Error() == errLocked.Error() {
return ErrLocked
} else if err != errLockViolation {
return err
}
return nil
}
func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
var reserved uint32 = 0
r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@ -1,47 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import "os"
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
if extendFile {
return preallocExtend(f, sizeInBytes)
}
return preallocFixed(f, sizeInBytes)
}
func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
curOff, err := f.Seek(0, os.SEEK_CUR)
if err != nil {
return err
}
size, err := f.Seek(sizeInBytes, os.SEEK_END)
if err != nil {
return err
}
if _, err = f.Seek(curOff, os.SEEK_SET); err != nil {
return err
}
if sizeInBytes > size {
return nil
}
return f.Truncate(sizeInBytes)
}

View File

@ -1,43 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin
package fileutil
import (
"os"
"syscall"
"unsafe"
)
func preallocExtend(f *os.File, sizeInBytes int64) error {
if err := preallocFixed(f, sizeInBytes); err != nil {
return err
}
return preallocExtendTrunc(f, sizeInBytes)
}
func preallocFixed(f *os.File, sizeInBytes int64) error {
fstore := &syscall.Fstore_t{
Flags: syscall.F_ALLOCATEALL,
Posmode: syscall.F_PEOFPOSMODE,
Length: sizeInBytes}
p := unsafe.Pointer(fstore)
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
if errno == 0 || errno == syscall.ENOTSUP {
return nil
}
return errno
}

View File

@ -1,49 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
func preallocExtend(f *os.File, sizeInBytes int64) error {
// use mode = 0 to change size
err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
if err != nil {
errno, ok := err.(syscall.Errno)
// not supported; fallback
// fallocate EINTRs frequently in some environments; fallback
if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
return preallocExtendTrunc(f, sizeInBytes)
}
}
return err
}
func preallocFixed(f *os.File, sizeInBytes int64) error {
// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
if err != nil {
errno, ok := err.(syscall.Errno)
// treat not supported as nil error
if ok && errno == syscall.ENOTSUP {
return nil
}
}
return err
}

View File

@ -1,25 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux,!darwin
package fileutil
import "os"
func preallocExtend(f *os.File, sizeInBytes int64) error {
return preallocExtendTrunc(f, sizeInBytes)
}
func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }

View File

@ -1,78 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path"
"sort"
"strings"
"time"
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
return purgeFile(dirname, suffix, max, interval, stop, nil)
}
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
errC := make(chan error, 1)
go func() {
for {
fnames, err := ReadDir(dirname)
if err != nil {
errC <- err
return
}
newfnames := make([]string, 0)
for _, fname := range fnames {
if strings.HasSuffix(fname, suffix) {
newfnames = append(newfnames, fname)
}
}
sort.Strings(newfnames)
fnames = newfnames
for len(newfnames) > int(max) {
f := path.Join(dirname, newfnames[0])
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
if err != nil {
break
}
if err = os.Remove(f); err != nil {
errC <- err
return
}
if err = l.Close(); err != nil {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
plog.Infof("purged file %s successfully", f)
newfnames = newfnames[1:]
}
if purgec != nil {
for i := 0; i < len(fnames)-len(newfnames); i++ {
purgec <- fnames[i]
}
}
select {
case <-time.After(interval):
case <-stop:
return
}
}
}()
return errC
}

View File

@ -1,29 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux,!darwin
package fileutil
import "os"
// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
func Fsync(f *os.File) error {
return f.Sync()
}
// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
func Fdatasync(f *os.File) error {
return f.Sync()
}

View File

@ -1,40 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin
package fileutil
import (
"os"
"syscall"
)
// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
// may not write it to the persistent media for quite sometime and it may be
// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
// physical drive's buffer will also get flushed to the media.
func Fsync(f *os.File) error {
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
if errno == 0 {
return nil
}
return errno
}
// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
// on physical drive media.
func Fdatasync(f *os.File) error {
return Fsync(f)
}

View File

@ -1,34 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
func Fsync(f *os.File) error {
return f.Sync()
}
// Fdatasync is similar to fsync(), but does not flush modified metadata
// unless that metadata is needed in order to allow a subsequent data retrieval
// to be correctly handled.
func Fdatasync(f *os.File) error {
return syscall.Fdatasync(int(f.Fd()))
}

View File

@ -1,31 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pathutil implements utility functions for handling slash-separated
// paths.
package pathutil
import "path"
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
// 1. the path always starts with "/"
// 2. replace multiple slashes with a single slash
// 3. replace each '.' '..' path name element with equivalent one
// 4. keep the trailing slash
// The function is borrowed from stdlib http.cleanPath in server.go.
func CanonicalURLPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root,
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}

View File

@ -1,16 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tlsutil provides utility functions for handling TLS.
package tlsutil

View File

@ -1,72 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlsutil
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io/ioutil"
)
// NewCertPool creates x509 certPool with provided CA files.
func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, CAFile := range CAFiles {
pemByte, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
for {
var block *pem.Block
block, pemByte = pem.Decode(pemByte)
if block == nil {
break
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certPool.AddCert(cert)
}
}
return certPool, nil
}
// NewCert generates TLS cert by using the given cert,key and parse function.
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
cert, err := ioutil.ReadFile(certfile)
if err != nil {
return nil, err
}
key, err := ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
if parseFunc == nil {
parseFunc = tls.X509KeyPair
}
tlsCert, err := parseFunc(cert, key)
if err != nil {
return nil, err
}
return &tlsCert, nil
}

View File

@ -1,17 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport implements various HTTP transport utilities based on Go
// net package.
package transport

View File

@ -1,94 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"crypto/tls"
"fmt"
"net"
"time"
)
type keepAliveConn interface {
SetKeepAlive(bool) error
SetKeepAlivePeriod(d time.Duration) error
}
// NewKeepAliveListener returns a listener that listens on the given address.
// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
if scheme == "https" {
if tlscfg == nil {
return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
}
return newTLSKeepaliveListener(l, tlscfg), nil
}
return &keepaliveListener{
Listener: l,
}, nil
}
type keepaliveListener struct{ net.Listener }
func (kln *keepaliveListener) Accept() (net.Conn, error) {
c, err := kln.Listener.Accept()
if err != nil {
return nil, err
}
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
return c, nil
}
// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
type tlsKeepaliveListener struct {
net.Listener
config *tls.Config
}
// Accept waits for and returns the next incoming TLS connection.
// The returned connection c is a *tls.Conn.
func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
c, err = l.Listener.Accept()
if err != nil {
return
}
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
c = tls.Server(c, l.config)
return
}
// NewListener creates a Listener which accepts connections from an inner
// Listener and wraps each connection with Server.
// The configuration config must be non-nil and must have
// at least one certificate.
func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
l := &tlsKeepaliveListener{}
l.Listener = inner
l.config = config
return l
}

View File

@ -1,80 +0,0 @@
// Copyright 2013 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport provides network utility functions, complementing the more
// common ones in the net package.
package transport
import (
"errors"
"net"
"sync"
"time"
)
var (
ErrNotTCP = errors.New("only tcp connections have keepalive")
)
// LimitListener returns a Listener that accepts at most n simultaneous
// connections from the provided Listener.
func LimitListener(l net.Listener, n int) net.Listener {
return &limitListener{l, make(chan struct{}, n)}
}
type limitListener struct {
net.Listener
sem chan struct{}
}
func (l *limitListener) acquire() { l.sem <- struct{}{} }
func (l *limitListener) release() { <-l.sem }
func (l *limitListener) Accept() (net.Conn, error) {
l.acquire()
c, err := l.Listener.Accept()
if err != nil {
l.release()
return nil, err
}
return &limitListenerConn{Conn: c, release: l.release}, nil
}
type limitListenerConn struct {
net.Conn
releaseOnce sync.Once
release func()
}
func (l *limitListenerConn) Close() error {
err := l.Conn.Close()
l.releaseOnce.Do(l.release)
return err
}
func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
return ErrNotTCP
}
return tcpc.SetKeepAlive(doKeepAlive)
}
func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
return ErrNotTCP
}
return tcpc.SetKeepAlivePeriod(d)
}

View File

@ -1,274 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net"
"os"
"path"
"strings"
"time"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/tlsutil"
)
func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) {
if l, err = newListener(addr, scheme); err != nil {
return nil, err
}
return wrapTLS(addr, scheme, tlscfg, l)
}
func newListener(addr string, scheme string) (net.Listener, error) {
if scheme == "unix" || scheme == "unixs" {
// unix sockets via unix://laddr
return NewUnixListener(addr)
}
return net.Listen("tcp", addr)
}
func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) {
if scheme != "https" && scheme != "unixs" {
return l, nil
}
if tlscfg == nil {
l.Close()
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr)
}
return tls.NewListener(l, tlscfg), nil
}
type TLSInfo struct {
CertFile string
KeyFile string
CAFile string
TrustedCAFile string
ClientCertAuth bool
// ServerName ensures the cert matches the given host in case of discovery / virtual hosting
ServerName string
selfCert bool
// parseFunc exists to simplify testing. Typically, parseFunc
// should be left nil. In that case, tls.X509KeyPair will be used.
parseFunc func([]byte, []byte) (tls.Certificate, error)
}
func (info TLSInfo) String() string {
return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth)
}
func (info TLSInfo) Empty() bool {
return info.CertFile == "" && info.KeyFile == ""
}
func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) {
if err = fileutil.TouchDirAll(dirpath); err != nil {
return
}
certPath := path.Join(dirpath, "cert.pem")
keyPath := path.Join(dirpath, "key.pem")
_, errcert := os.Stat(certPath)
_, errkey := os.Stat(keyPath)
if errcert == nil && errkey == nil {
info.CertFile = certPath
info.KeyFile = keyPath
info.selfCert = true
return
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return
}
tmpl := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{Organization: []string{"etcd"}},
NotBefore: time.Now(),
NotAfter: time.Now().Add(365 * (24 * time.Hour)),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
for _, host := range hosts {
if ip := net.ParseIP(host); ip != nil {
tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
} else {
tmpl.DNSNames = append(tmpl.DNSNames, strings.Split(host, ":")[0])
}
}
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return
}
derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
if err != nil {
return
}
certOut, err := os.Create(certPath)
if err != nil {
return
}
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
b, err := x509.MarshalECPrivateKey(priv)
if err != nil {
return
}
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return
}
pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
keyOut.Close()
return SelfCert(dirpath, hosts)
}
func (info TLSInfo) baseConfig() (*tls.Config, error) {
if info.KeyFile == "" || info.CertFile == "" {
return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
}
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
if err != nil {
return nil, err
}
cfg := &tls.Config{
Certificates: []tls.Certificate{*tlsCert},
MinVersion: tls.VersionTLS12,
ServerName: info.ServerName,
}
return cfg, nil
}
// cafiles returns a list of CA file paths.
func (info TLSInfo) cafiles() []string {
cs := make([]string, 0)
if info.CAFile != "" {
cs = append(cs, info.CAFile)
}
if info.TrustedCAFile != "" {
cs = append(cs, info.TrustedCAFile)
}
return cs
}
// ServerConfig generates a tls.Config object for use by an HTTP server.
func (info TLSInfo) ServerConfig() (*tls.Config, error) {
cfg, err := info.baseConfig()
if err != nil {
return nil, err
}
cfg.ClientAuth = tls.NoClientCert
if info.CAFile != "" || info.ClientCertAuth {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
}
CAFiles := info.cafiles()
if len(CAFiles) > 0 {
cp, err := tlsutil.NewCertPool(CAFiles)
if err != nil {
return nil, err
}
cfg.ClientCAs = cp
}
// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
cfg.NextProtos = []string{"h2"}
return cfg, nil
}
// ClientConfig generates a tls.Config object for use by an HTTP client.
func (info TLSInfo) ClientConfig() (*tls.Config, error) {
var cfg *tls.Config
var err error
if !info.Empty() {
cfg, err = info.baseConfig()
if err != nil {
return nil, err
}
} else {
cfg = &tls.Config{ServerName: info.ServerName}
}
CAFiles := info.cafiles()
if len(CAFiles) > 0 {
cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles)
if err != nil {
return nil, err
}
// if given a CA, trust any host with a cert signed by the CA
cfg.ServerName = ""
}
if info.selfCert {
cfg.InsecureSkipVerify = true
}
return cfg, nil
}
// ShallowCopyTLSConfig copies *tls.Config. This is only
// work-around for go-vet tests, which complains
//
// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex
//
// Keep up-to-date with 'go/src/crypto/tls/common.go'
func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config {
ncfg := tls.Config{
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
SessionTicketKey: cfg.SessionTicketKey,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
return &ncfg
}

View File

@ -1,44 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"time"
)
type timeoutConn struct {
net.Conn
wtimeoutd time.Duration
rdtimeoutd time.Duration
}
func (c timeoutConn) Write(b []byte) (n int, err error) {
if c.wtimeoutd > 0 {
if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil {
return 0, err
}
}
return c.Conn.Write(b)
}
func (c timeoutConn) Read(b []byte) (n int, err error) {
if c.rdtimeoutd > 0 {
if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil {
return 0, err
}
}
return c.Conn.Read(b)
}

View File

@ -1,36 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"time"
)
type rwTimeoutDialer struct {
wtimeoutd time.Duration
rdtimeoutd time.Duration
net.Dialer
}
func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) {
conn, err := d.Dialer.Dial(network, address)
tconn := &timeoutConn{
rdtimeoutd: d.rdtimeoutd,
wtimeoutd: d.wtimeoutd,
Conn: conn,
}
return tconn, err
}

View File

@ -1,58 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"crypto/tls"
"net"
"time"
)
// NewTimeoutListener returns a listener that listens on the given address.
// If read/write on the accepted connection blocks longer than its time limit,
// it will return timeout error.
func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) {
ln, err := newListener(addr, scheme)
if err != nil {
return nil, err
}
ln = &rwTimeoutListener{
Listener: ln,
rdtimeoutd: rdtimeoutd,
wtimeoutd: wtimeoutd,
}
if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil {
return nil, err
}
return ln, nil
}
type rwTimeoutListener struct {
net.Listener
wtimeoutd time.Duration
rdtimeoutd time.Duration
}
func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
c, err := rwln.Listener.Accept()
if err != nil {
return nil, err
}
return timeoutConn{
Conn: c,
wtimeoutd: rwln.wtimeoutd,
rdtimeoutd: rwln.rdtimeoutd,
}, nil
}

View File

@ -1,51 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"net/http"
"time"
)
// NewTimeoutTransport returns a transport created using the given TLS info.
// If read/write on the created connection blocks longer than its time limit,
// it will return timeout error.
// If read/write timeout is set, transport will not be able to reuse connection.
func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
tr, err := NewTransport(info, dialtimeoutd)
if err != nil {
return nil, err
}
if rdtimeoutd != 0 || wtimeoutd != 0 {
// the timed out connection will timeout soon after it is idle.
// it should not be put back to http transport as an idle connection for future usage.
tr.MaxIdleConnsPerHost = -1
} else {
// allow more idle connections between peers to avoid unnecessary port allocation.
tr.MaxIdleConnsPerHost = 1024
}
tr.Dial = (&rwTimeoutDialer{
Dialer: net.Dialer{
Timeout: dialtimeoutd,
KeepAlive: 30 * time.Second,
},
rdtimeoutd: rdtimeoutd,
wtimeoutd: wtimeoutd,
}).Dial
return tr, nil
}

View File

@ -1,49 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"fmt"
"strings"
"time"
)
// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those
// endpoints that could be validated as secure.
func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
t, err := NewTransport(tlsInfo, 5*time.Second)
if err != nil {
return nil, err
}
var errs []string
var endpoints []string
for _, ep := range eps {
if !strings.HasPrefix(ep, "https://") {
errs = append(errs, fmt.Sprintf("%q is insecure", ep))
continue
}
conn, cerr := t.Dial("tcp", ep[len("https://"):])
if cerr != nil {
errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
continue
}
conn.Close()
endpoints = append(endpoints, ep)
}
if len(errs) != 0 {
err = fmt.Errorf("%s", strings.Join(errs, ","))
}
return endpoints, err
}

View File

@ -1,71 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"net/http"
"strings"
"time"
)
type unixTransport struct{ *http.Transport }
func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
cfg, err := info.ClientConfig()
if err != nil {
return nil, err
}
t := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: dialtimeoutd,
// value taken from http.DefaultTransport
KeepAlive: 30 * time.Second,
}).Dial,
// value taken from http.DefaultTransport
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
}
dialer := (&net.Dialer{
Timeout: dialtimeoutd,
KeepAlive: 30 * time.Second,
})
dial := func(net, addr string) (net.Conn, error) {
return dialer.Dial("unix", addr)
}
tu := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
}
ut := &unixTransport{tu}
t.RegisterProtocol("unix", ut)
t.RegisterProtocol("unixs", ut)
return t, nil
}
func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
url := *req.URL
req.URL = &url
req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
return urt.Transport.RoundTrip(req)
}

View File

@ -1,40 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"net"
"os"
)
type unixListener struct{ net.Listener }
func NewUnixListener(addr string) (net.Listener, error) {
if err := os.RemoveAll(addr); err != nil {
return nil, err
}
l, err := net.Listen("unix", addr)
if err != nil {
return nil, err
}
return &unixListener{l}, nil
}
func (ul *unixListener) Close() error {
if err := os.RemoveAll(ul.Addr().String()); err != nil {
return err
}
return ul.Listener.Close()
}

View File

@ -1,17 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package types declares various data types and implements type-checking
// functions.
package types

View File

@ -1,41 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"strconv"
)
// ID represents a generic identifier which is canonically
// stored as a uint64 but is typically represented as a
// base-16 string for input/output
type ID uint64
func (i ID) String() string {
return strconv.FormatUint(uint64(i), 16)
}
// IDFromString attempts to create an ID from a base-16 string.
func IDFromString(s string) (ID, error) {
i, err := strconv.ParseUint(s, 16, 64)
return ID(i), err
}
// IDSlice implements the sort interface
type IDSlice []ID
func (p IDSlice) Len() int { return len(p) }
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@ -1,178 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"sync"
)
type Set interface {
Add(string)
Remove(string)
Contains(string) bool
Equals(Set) bool
Length() int
Values() []string
Copy() Set
Sub(Set) Set
}
func NewUnsafeSet(values ...string) *unsafeSet {
set := &unsafeSet{make(map[string]struct{})}
for _, v := range values {
set.Add(v)
}
return set
}
func NewThreadsafeSet(values ...string) *tsafeSet {
us := NewUnsafeSet(values...)
return &tsafeSet{us, sync.RWMutex{}}
}
type unsafeSet struct {
d map[string]struct{}
}
// Add adds a new value to the set (no-op if the value is already present)
func (us *unsafeSet) Add(value string) {
us.d[value] = struct{}{}
}
// Remove removes the given value from the set
func (us *unsafeSet) Remove(value string) {
delete(us.d, value)
}
// Contains returns whether the set contains the given value
func (us *unsafeSet) Contains(value string) (exists bool) {
_, exists = us.d[value]
return
}
// ContainsAll returns whether the set contains all given values
func (us *unsafeSet) ContainsAll(values []string) bool {
for _, s := range values {
if !us.Contains(s) {
return false
}
}
return true
}
// Equals returns whether the contents of two sets are identical
func (us *unsafeSet) Equals(other Set) bool {
v1 := sort.StringSlice(us.Values())
v2 := sort.StringSlice(other.Values())
v1.Sort()
v2.Sort()
return reflect.DeepEqual(v1, v2)
}
// Length returns the number of elements in the set
func (us *unsafeSet) Length() int {
return len(us.d)
}
// Values returns the values of the Set in an unspecified order.
func (us *unsafeSet) Values() (values []string) {
values = make([]string, 0)
for val := range us.d {
values = append(values, val)
}
return
}
// Copy creates a new Set containing the values of the first
func (us *unsafeSet) Copy() Set {
cp := NewUnsafeSet()
for val := range us.d {
cp.Add(val)
}
return cp
}
// Sub removes all elements in other from the set
func (us *unsafeSet) Sub(other Set) Set {
oValues := other.Values()
result := us.Copy().(*unsafeSet)
for _, val := range oValues {
if _, ok := result.d[val]; !ok {
continue
}
delete(result.d, val)
}
return result
}
type tsafeSet struct {
us *unsafeSet
m sync.RWMutex
}
func (ts *tsafeSet) Add(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Add(value)
}
func (ts *tsafeSet) Remove(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Remove(value)
}
func (ts *tsafeSet) Contains(value string) (exists bool) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Contains(value)
}
func (ts *tsafeSet) Equals(other Set) bool {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Equals(other)
}
func (ts *tsafeSet) Length() int {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Length()
}
func (ts *tsafeSet) Values() (values []string) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Values()
}
func (ts *tsafeSet) Copy() Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Copy().(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}
func (ts *tsafeSet) Sub(other Set) Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Sub(other).(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}

View File

@ -1,22 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
// Uint64Slice implements sort interface
type Uint64Slice []uint64
func (p Uint64Slice) Len() int { return len(p) }
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@ -1,82 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"errors"
"fmt"
"net"
"net/url"
"sort"
"strings"
)
type URLs []url.URL
func NewURLs(strs []string) (URLs, error) {
all := make([]url.URL, len(strs))
if len(all) == 0 {
return nil, errors.New("no valid URLs given")
}
for i, in := range strs {
in = strings.TrimSpace(in)
u, err := url.Parse(in)
if err != nil {
return nil, err
}
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
}
if _, _, err := net.SplitHostPort(u.Host); err != nil {
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
}
if u.Path != "" {
return nil, fmt.Errorf("URL must not contain a path: %s", in)
}
all[i] = *u
}
us := URLs(all)
us.Sort()
return us, nil
}
func MustNewURLs(strs []string) URLs {
urls, err := NewURLs(strs)
if err != nil {
panic(err)
}
return urls
}
func (us URLs) String() string {
return strings.Join(us.StringSlice(), ",")
}
func (us *URLs) Sort() {
sort.Sort(us)
}
func (us URLs) Len() int { return len(us) }
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
func (us URLs) StringSlice() []string {
out := make([]string, len(us))
for i := range us {
out[i] = us[i].String()
}
return out
}

View File

@ -1,107 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"fmt"
"sort"
"strings"
)
// URLsMap is a map from a name to its URLs.
type URLsMap map[string]URLs
// NewURLsMap returns a URLsMap instantiated from the given string,
// which consists of discovery-formatted names-to-URLs, like:
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
func NewURLsMap(s string) (URLsMap, error) {
m := parse(s)
cl := URLsMap{}
for name, urls := range m {
us, err := NewURLs(urls)
if err != nil {
return nil, err
}
cl[name] = us
}
return cl, nil
}
// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
// string values in the map can be multiple values separated by the sep string.
func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
var err error
um := URLsMap{}
for k, v := range m {
um[k], err = NewURLs(strings.Split(v, sep))
if err != nil {
return nil, err
}
}
return um, nil
}
// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
func (c URLsMap) String() string {
var pairs []string
for name, urls := range c {
for _, url := range urls {
pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
}
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
// URLs returns a list of all URLs.
// The returned list is sorted in ascending lexicographical order.
func (c URLsMap) URLs() []string {
var urls []string
for _, us := range c {
for _, u := range us {
urls = append(urls, u.String())
}
}
sort.Strings(urls)
return urls
}
// Len returns the size of URLsMap.
func (c URLsMap) Len() int {
return len(c)
}
// parse parses the given string and returns a map listing the values specified for each key.
func parse(s string) map[string][]string {
m := make(map[string][]string)
for s != "" {
key := s
if i := strings.IndexAny(key, ","); i >= 0 {
key, s = key[:i], key[i+1:]
} else {
s = ""
}
if key == "" {
continue
}
value := ""
if i := strings.Index(key, "="); i >= 0 {
key, value = key[:i], key[i+1:]
}
m[key] = append(m[key], value)
}
return m
}

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,22 +0,0 @@
package types
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}

View File

@ -1,23 +0,0 @@
package blkiodev
import "fmt"
// WeightDevice is a structure that holds device:weight pair
type WeightDevice struct {
Path string
Weight uint16
}
func (w *WeightDevice) String() string {
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// ThrottleDevice is a structure that holds device:rate_per_second pair
type ThrottleDevice struct {
Path string
Rate uint64
}
func (t *ThrottleDevice) String() string {
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
}

View File

@ -1,235 +0,0 @@
package types
import (
"bufio"
"io"
"net"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/filters"
"github.com/docker/go-units"
)
// ContainerAttachOptions holds parameters to attach to a container.
type ContainerAttachOptions struct {
Stream bool
Stdin bool
Stdout bool
Stderr bool
DetachKeys string
}
// ContainerCommitOptions holds parameters to commit changes into a container.
type ContainerCommitOptions struct {
Reference string
Comment string
Author string
Changes []string
Pause bool
Config *container.Config
}
// ContainerExecInspect holds information returned by exec inspect.
type ContainerExecInspect struct {
ExecID string
ContainerID string
Running bool
ExitCode int
}
// ContainerListOptions holds parameters to list containers with.
type ContainerListOptions struct {
Quiet bool
Size bool
All bool
Latest bool
Since string
Before string
Limit int
Filter filters.Args
}
// ContainerLogsOptions holds parameters to filter logs with.
type ContainerLogsOptions struct {
ShowStdout bool
ShowStderr bool
Since string
Timestamps bool
Follow bool
Tail string
Details bool
}
// ContainerRemoveOptions holds parameters to remove containers.
type ContainerRemoveOptions struct {
RemoveVolumes bool
RemoveLinks bool
Force bool
}
// CopyToContainerOptions holds information
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
}
// EventsOptions hold parameters to filter events with.
type EventsOptions struct {
Since string
Until string
Filters filters.Args
}
// NetworkListOptions holds parameters to filter the list of networks with.
type NetworkListOptions struct {
Filters filters.Args
}
// HijackedResponse holds connection information for a hijacked request.
type HijackedResponse struct {
Conn net.Conn
Reader *bufio.Reader
}
// Close closes the hijacked connection and reader.
func (h *HijackedResponse) Close() {
h.Conn.Close()
}
// CloseWriter is an interface that implements structs
// that close input streams to prevent from writing.
type CloseWriter interface {
CloseWrite() error
}
// CloseWrite closes a readWriter for writing.
func (h *HijackedResponse) CloseWrite() error {
if conn, ok := h.Conn.(CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// ImageBuildOptions holds the information
// necessary to build images.
type ImageBuildOptions struct {
Tags []string
SuppressOutput bool
RemoteContext string
NoCache bool
Remove bool
ForceRemove bool
PullParent bool
Isolation container.Isolation
CPUSetCPUs string
CPUSetMems string
CPUShares int64
CPUQuota int64
CPUPeriod int64
Memory int64
MemorySwap int64
CgroupParent string
ShmSize int64
Dockerfile string
Ulimits []*units.Ulimit
BuildArgs map[string]string
AuthConfigs map[string]AuthConfig
Context io.Reader
Labels map[string]string
}
// ImageBuildResponse holds information
// returned by a server after building
// an image.
type ImageBuildResponse struct {
Body io.ReadCloser
OSType string
}
// ImageCreateOptions holds information to create images.
type ImageCreateOptions struct {
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
}
// ImageImportSource holds source information for ImageImport
type ImageImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
}
// ImageImportOptions holds information to import images from the client host.
type ImageImportOptions struct {
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
Message string // Message is the message to tag the image with
Changes []string // Changes are the raw changes to apply to this image
}
// ImageListOptions holds parameters to filter the list of images with.
type ImageListOptions struct {
MatchName string
All bool
Filters filters.Args
}
// ImageLoadResponse returns information to the client about a load process.
type ImageLoadResponse struct {
// Body must be closed to avoid a resource leak
Body io.ReadCloser
JSON bool
}
// ImagePullOptions holds information to pull images.
type ImagePullOptions struct {
All bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
PrivilegeFunc RequestPrivilegeFunc
}
// RequestPrivilegeFunc is a function interface that
// clients can supply to retry operations after
// getting an authorization error.
// This function returns the registry authentication
// header value in base 64 format, or an error
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)
//ImagePushOptions holds information to push images.
type ImagePushOptions ImagePullOptions
// ImageRemoveOptions holds parameters to remove images.
type ImageRemoveOptions struct {
Force bool
PruneChildren bool
}
// ImageSearchOptions holds parameters to search images with.
type ImageSearchOptions struct {
RegistryAuth string
PrivilegeFunc RequestPrivilegeFunc
Filters filters.Args
}
// ImageTagOptions holds parameters to tag an image
type ImageTagOptions struct {
Force bool
}
// ResizeOptions holds parameters to resize a tty.
// It can be used to resize container ttys and
// exec process ttys too.
type ResizeOptions struct {
Height int
Width int
}
// VersionResponse holds version information for the client and the server
type VersionResponse struct {
Client *Version
Server *Version
}
// ServerOK returns true when the client could connect to the docker server
// and parse the information received. It returns false otherwise.
func (v VersionResponse) ServerOK() bool {
return v.Server != nil
}

View File

@ -1,53 +0,0 @@
package types
import (
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
)
// configs holds structs used for internal communication between the
// frontend (such as an http server) and the backend (such as the
// docker daemon).
// ContainerCreateConfig is the parameter set to ContainerCreate()
type ContainerCreateConfig struct {
Name string
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
AdjustCPUShares bool
}
// ContainerRmConfig holds arguments for the container remove
// operation. This struct is used to tell the backend what operations
// to perform.
type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool
}
// ContainerCommitConfig contains build configs for commit operation,
// and is used when making a commit with the current state of the container.
type ContainerCommitConfig struct {
Pause bool
Repo string
Tag string
Author string
Comment string
// merge container config into commit config before commit
MergeConfigs bool
Config *container.Config
}
// ExecConfig is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
type ExecConfig struct {
User string // User that will run the command
Privileged bool // Is the container in privileged mode
Tty bool // Attach standard streams to a tty.
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStderr bool // Attach the standard output
AttachStdout bool // Attach the standard error
Detach bool // Execute in detach mode
DetachKeys string // Escape keys for detach
Cmd []string // Execution commands and args
}

View File

@ -1,37 +0,0 @@
package container
import (
"github.com/docker/engine-api/types/strslice"
"github.com/docker/go-connections/nat"
)
// Config contains the configuration data about a container.
// It should hold only portable information about the container.
// Here, "portable" means "independent from the host we are running on".
// Non-portable information *should* appear in HostConfig.
// All fields added to this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
}

View File

@ -1,320 +0,0 @@
package container
import (
"strings"
"github.com/docker/engine-api/types/blkiodev"
"github.com/docker/engine-api/types/strslice"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
)
// NetworkMode represents the container network stack.
type NetworkMode string
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i Isolation) IsDefault() bool {
return strings.ToLower(string(i)) == "default" || string(i) == ""
}
// IpcMode represents the container ipc stack.
type IpcMode string
// IsPrivate indicates whether the container uses its private ipc stack.
func (n IpcMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's ipc stack.
func (n IpcMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's ipc stack.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the ipc stack is valid.
func (n IpcMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
// IsHost indicates whether the container uses the host's userns.
func (n UsernsMode) IsHost() bool {
return n == "host"
}
// IsPrivate indicates whether the container uses the a private userns.
func (n UsernsMode) IsPrivate() bool {
return !(n.IsHost())
}
// Valid indicates whether the userns is valid.
func (n UsernsMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// CgroupSpec represents the cgroup to use for the container.
type CgroupSpec string
// IsContainer indicates whether the container is using another container cgroup
func (c CgroupSpec) IsContainer() bool {
parts := strings.SplitN(string(c), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the cgroup spec is valid.
func (c CgroupSpec) Valid() bool {
return c.IsContainer() || c == ""
}
// Container returns the name of the container whose cgroup will be used.
func (c CgroupSpec) Container() string {
parts := strings.SplitN(string(c), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UTSMode represents the UTS namespace of the container.
type UTSMode string
// IsPrivate indicates whether the container uses its private UTS namespace.
func (n UTSMode) IsPrivate() bool {
return !(n.IsHost())
}
// IsHost indicates whether the container uses the host's UTS namespace.
func (n UTSMode) IsHost() bool {
return n == "host"
}
// Valid indicates whether the UTS namespace is valid.
func (n UTSMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// PidMode represents the pid namespace of the container.
type PidMode string
// IsPrivate indicates whether the container uses its own new pid namespace.
func (n PidMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's pid namespace.
func (n PidMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's pid namespace.
func (n PidMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the pid namespace is valid.
func (n PidMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container whose pid namespace is going to be used.
func (n PidMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// DeviceMapping represents the device mapping between the host and the container.
type DeviceMapping struct {
PathOnHost string
PathInContainer string
CgroupPermissions string
}
// RestartPolicy represents the restart policies of the container.
type RestartPolicy struct {
Name string
MaximumRetryCount int
}
// IsNone indicates whether the container has the "no" restart policy.
// This means the container will not automatically restart when exiting.
func (rp *RestartPolicy) IsNone() bool {
return rp.Name == "no"
}
// IsAlways indicates whether the container has the "always" restart policy.
// This means the container will automatically restart regardless of the exit status.
func (rp *RestartPolicy) IsAlways() bool {
return rp.Name == "always"
}
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
// This means the container will automatically restart of exiting with a non-zero exit status.
func (rp *RestartPolicy) IsOnFailure() bool {
return rp.Name == "on-failure"
}
// IsUnlessStopped indicates whether the container has the
// "unless-stopped" restart policy. This means the container will
// automatically restart unless user has put it to stopped state.
func (rp *RestartPolicy) IsUnlessStopped() bool {
return rp.Name == "unless-stopped"
}
// IsSame compares two RestartPolicy to see if they are the same
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
Config map[string]string
}
// Resources contains container's resources (cgroups config, ulimits...)
type Resources struct {
// Applicable to all platforms
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
Memory int64 // Memory limit (in bytes)
// Applicable to UNIX platforms
CgroupParent string // Parent cgroup.
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
BlkioWeightDevice []*blkiodev.WeightDevice
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DiskQuota int64 // Disk limit (in bytes)
KernelMemory int64 // Kernel memory limit (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
OomKillDisable *bool // Whether to disable OOM Killer or not
PidsLimit int64 // Setting pids limit for a container
Ulimits []*units.Ulimit // List of ulimits to be set in the container
// Applicable to Windows
CPUCount int64 `json:"CpuCount"` // CPU count
CPUPercent int64 `json:"CpuPercent"` // CPU percent
IOMaximumIOps uint64 // Maximum IOps for the container system drive
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second
}
// UpdateConfig holds the mutable attributes of a Container.
// Those attributes can be updated at runtime.
type UpdateConfig struct {
// Contains container's resources (cgroups, ulimits)
Resources
RestartPolicy RestartPolicy
}
// HostConfig the non-portable Config structure of a container.
// Here, "non-portable" means "dependent of the host we are running on".
// Portable information *should* appear in Config.
type HostConfig struct {
// Applicable to all platforms
Binds []string // List of volume bindings for this container
ContainerIDFile string // File (path) where the containerId is written
LogConfig LogConfig // Configuration of the logs for this container
NetworkMode NetworkMode // Network mode to use for the container
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
RestartPolicy RestartPolicy // Restart policy to be used for the container
AutoRemove bool // Automatically remove container when it exits
VolumeDriver string // Name of the volume driver used to mount volumes
VolumesFrom []string // List of volumes to take from other container
// Applicable to UNIX platforms
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
ExtraHosts []string // List of extra hosts
GroupAdd []string // List of additional groups that the container process will run as
IpcMode IpcMode // IPC namespace to use for the container
Cgroup CgroupSpec // Cgroup to use for the container
Links []string // List of links (in the name:alias form)
OomScoreAdj int // Container preference for OOM-killing
PidMode PidMode // PID namespace to use for the container
Privileged bool // Is the container in privileged mode
PublishAllPorts bool // Should docker publish all exposed port for the container
ReadonlyRootfs bool // Is the container root filesystem in read-only
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
StorageOpt map[string]string // Storage driver options per container.
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
UTSMode UTSMode // UTS namespace to use for the container
UsernsMode UsernsMode // The user namespace to use for the container
ShmSize int64 // Total shm memory usage
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
// Applicable to Windows
ConsoleSize [2]int // Initial console size
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources
}

View File

@ -1,81 +0,0 @@
// +build !windows
package container
import "strings"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses it's private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
return "bridge"
} else if n.IsHost() {
return "host"
} else if n.IsContainer() {
return "container"
} else if n.IsNone() {
return "none"
} else if n.IsDefault() {
return "default"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
// IsBridge indicates whether container uses the bridge network stack
func (n NetworkMode) IsBridge() bool {
return n == "bridge"
}
// IsHost indicates whether container uses the host network stack.
func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@ -1,87 +0,0 @@
package container
import (
"strings"
)
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsContainer indicates whether container uses a container network stack.
// Returns false as windows doesn't support this mode
func (n NetworkMode) IsContainer() bool {
return false
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
return n == "nat"
}
// IsHost indicates whether container uses the host network stack.
// returns false as this is not supported by windows
func (n NetworkMode) IsHost() bool {
return false
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// ConnectedContainer is the id of the container which network this container is connected to.
// Returns blank string on windows
func (n NetworkMode) ConnectedContainer() string {
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i Isolation) IsHyperV() bool {
return strings.ToLower(string(i)) == "hyperv"
}
// IsProcess indicates the use of process isolation
func (i Isolation) IsProcess() bool {
return strings.ToLower(string(i)) == "process"
}
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsDefault() {
return "default"
} else if n.IsBridge() {
return "nat"
} else if n.IsNone() {
return "none"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@ -1,295 +0,0 @@
// Package filters provides helper function to parse and handle command line
// filter, used for example in docker ps or docker images commands.
package filters
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strings"
"github.com/docker/engine-api/types/versions"
)
// Args stores filter arguments as map key:{map key: bool}.
// It contains an aggregation of the map of arguments (which are in the form
// of -f 'key=value') based on the key, and stores values for the same key
// in a map with string keys and boolean values.
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
type Args struct {
fields map[string]map[string]bool
}
// NewArgs initializes a new Args struct.
func NewArgs() Args {
return Args{fields: map[string]map[string]bool{}}
}
// ParseFlag parses the argument to the filter flag. Like
//
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
//
// If prev map is provided, then it is appended to, and returned. By default a new
// map is created.
func ParseFlag(arg string, prev Args) (Args, error) {
filters := prev
if len(arg) == 0 {
return filters, nil
}
if !strings.Contains(arg, "=") {
return filters, ErrBadFormat
}
f := strings.SplitN(arg, "=", 2)
name := strings.ToLower(strings.TrimSpace(f[0]))
value := strings.TrimSpace(f[1])
filters.Add(name, value)
return filters, nil
}
// ErrBadFormat is an error returned in case of bad format for a filter.
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
// ToParam packs the Args into a string for easy transport from client to server.
func ToParam(a Args) (string, error) {
// this way we don't URL encode {}, just empty space
if a.Len() == 0 {
return "", nil
}
buf, err := json.Marshal(a.fields)
if err != nil {
return "", err
}
return string(buf), nil
}
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
// The generated string will depend on the specified version (corresponding to the API version).
func ToParamWithVersion(version string, a Args) (string, error) {
// this way we don't URL encode {}, just empty space
if a.Len() == 0 {
return "", nil
}
// for daemons older than v1.10, filter must be of the form map[string][]string
buf := []byte{}
err := errors.New("")
if version != "" && versions.LessThan(version, "1.22") {
buf, err = json.Marshal(convertArgsToSlice(a.fields))
} else {
buf, err = json.Marshal(a.fields)
}
if err != nil {
return "", err
}
return string(buf), nil
}
// FromParam unpacks the filter Args.
func FromParam(p string) (Args, error) {
if len(p) == 0 {
return NewArgs(), nil
}
r := strings.NewReader(p)
d := json.NewDecoder(r)
m := map[string]map[string]bool{}
if err := d.Decode(&m); err != nil {
r.Seek(0, 0)
// Allow parsing old arguments in slice format.
// Because other libraries might be sending them in this format.
deprecated := map[string][]string{}
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
m = deprecatedArgs(deprecated)
} else {
return NewArgs(), err
}
}
return Args{m}, nil
}
// Get returns the list of values associates with a field.
// It returns a slice of strings to keep backwards compatibility with old code.
func (filters Args) Get(field string) []string {
values := filters.fields[field]
if values == nil {
return make([]string, 0)
}
slice := make([]string, 0, len(values))
for key := range values {
slice = append(slice, key)
}
return slice
}
// Add adds a new value to a filter field.
func (filters Args) Add(name, value string) {
if _, ok := filters.fields[name]; ok {
filters.fields[name][value] = true
} else {
filters.fields[name] = map[string]bool{value: true}
}
}
// Del removes a value from a filter field.
func (filters Args) Del(name, value string) {
if _, ok := filters.fields[name]; ok {
delete(filters.fields[name], value)
}
}
// Len returns the number of fields in the arguments.
func (filters Args) Len() int {
return len(filters.fields)
}
// MatchKVList returns true if the values for the specified field matches the ones
// from the sources.
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
// it returns true.
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
fieldValues := filters.fields[field]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
if sources == nil || len(sources) == 0 {
return false
}
for name2match := range fieldValues {
testKV := strings.SplitN(name2match, "=", 2)
v, ok := sources[testKV[0]]
if !ok {
return false
}
if len(testKV) == 2 && testKV[1] != v {
return false
}
}
return true
}
// Match returns true if the values for the specified field matches the source string
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
// field is 'image.name' and source is 'ubuntu'
// it returns true.
func (filters Args) Match(field, source string) bool {
if filters.ExactMatch(field, source) {
return true
}
fieldValues := filters.fields[field]
for name2match := range fieldValues {
match, err := regexp.MatchString(name2match, source)
if err != nil {
continue
}
if match {
return true
}
}
return false
}
// ExactMatch returns true if the source matches exactly one of the filters.
func (filters Args) ExactMatch(field, source string) bool {
fieldValues, ok := filters.fields[field]
//do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
}
// try to match full name value to avoid O(N) regular expression matching
if fieldValues[source] {
return true
}
return false
}
// FuzzyMatch returns true if the source matches exactly one of the filters,
// or the source has one of the filters as a prefix.
func (filters Args) FuzzyMatch(field, source string) bool {
if filters.ExactMatch(field, source) {
return true
}
fieldValues := filters.fields[field]
for prefix := range fieldValues {
if strings.HasPrefix(source, prefix) {
return true
}
}
return false
}
// Include returns true if the name of the field to filter is in the filters.
func (filters Args) Include(field string) bool {
_, ok := filters.fields[field]
return ok
}
// Validate ensures that all the fields in the filter are valid.
// It returns an error as soon as it finds an invalid field.
func (filters Args) Validate(accepted map[string]bool) error {
for name := range filters.fields {
if !accepted[name] {
return fmt.Errorf("Invalid filter '%s'", name)
}
}
return nil
}
// WalkValues iterates over the list of filtered values for a field.
// It stops the iteration if it finds an error and it returns that error.
func (filters Args) WalkValues(field string, op func(value string) error) error {
if _, ok := filters.fields[field]; !ok {
return nil
}
for v := range filters.fields[field] {
if err := op(v); err != nil {
return err
}
}
return nil
}
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
m := map[string]map[string]bool{}
for k, v := range d {
values := map[string]bool{}
for _, vv := range v {
values[vv] = true
}
m[k] = values
}
return m
}
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
m := map[string][]string{}
for k, v := range f {
values := []string{}
for kk := range v {
if v[kk] {
values = append(values, kk)
}
}
m[k] = values
}
return m
}

View File

@ -1,52 +0,0 @@
package network
// Address represents an IP address
type Address struct {
Addr string
PrefixLen int
}
// IPAM represents IP Address Management
type IPAM struct {
Driver string
Options map[string]string //Per network IPAM driver options
Config []IPAMConfig
}
// IPAMConfig represents IPAM configurations
type IPAMConfig struct {
Subnet string `json:",omitempty"`
IPRange string `json:",omitempty"`
Gateway string `json:",omitempty"`
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
}
// EndpointIPAMConfig represents IPAM configurations for the endpoint
type EndpointIPAMConfig struct {
IPv4Address string `json:",omitempty"`
IPv6Address string `json:",omitempty"`
}
// EndpointSettings stores the network endpoint details
type EndpointSettings struct {
// Configurations
IPAMConfig *EndpointIPAMConfig
Links []string
Aliases []string
// Operational data
NetworkID string
EndpointID string
Gateway string
IPAddress string
IPPrefixLen int
IPv6Gateway string
GlobalIPv6Address string
GlobalIPv6PrefixLen int
MacAddress string
}
// NetworkingConfig represents the container's networking configuration for each of its interfaces
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
type NetworkingConfig struct {
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
}

View File

@ -1,99 +0,0 @@
package registry
import (
"encoding/json"
"net"
)
// ServiceConfig stores daemon registry services configuration.
type ServiceConfig struct {
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
}
// NetIPNet is the net.IPNet type, which can be marshalled and
// unmarshalled to JSON
type NetIPNet net.IPNet
// MarshalJSON returns the JSON representation of the IPNet
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
return json.Marshal((*net.IPNet)(ipnet).String())
}
// UnmarshalJSON sets the IPNet from a byte array of JSON
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
var ipnetStr string
if err = json.Unmarshal(b, &ipnetStr); err == nil {
var cidr *net.IPNet
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
*ipnet = NetIPNet(*cidr)
}
}
return
}
// IndexInfo contains information about a registry
//
// RepositoryInfo Examples:
// {
// "Index" : {
// "Name" : "docker.io",
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
// "Secure" : true,
// "Official" : true,
// },
// "RemoteName" : "library/debian",
// "LocalName" : "debian",
// "CanonicalName" : "docker.io/debian"
// "Official" : true,
// }
//
// {
// "Index" : {
// "Name" : "127.0.0.1:5000",
// "Mirrors" : [],
// "Secure" : false,
// "Official" : false,
// },
// "RemoteName" : "user/repo",
// "LocalName" : "127.0.0.1:5000/user/repo",
// "CanonicalName" : "127.0.0.1:5000/user/repo",
// "Official" : false,
// }
type IndexInfo struct {
// Name is the name of the registry, such as "docker.io"
Name string
// Mirrors is a list of mirrors, expressed as URIs
Mirrors []string
// Secure is set to false if the registry is part of the list of
// insecure registries. Insecure registries accept HTTP and/or accept
// HTTPS with certificates from unknown CAs.
Secure bool
// Official indicates whether this is an official registry
Official bool
}
// SearchResult describes a search result returned from a registry
type SearchResult struct {
// StarCount indicates the number of stars this repository has
StarCount int `json:"star_count"`
// IsOfficial is true if the result is from an official repository.
IsOfficial bool `json:"is_official"`
// Name is the name of the repository
Name string `json:"name"`
// IsAutomated indicates whether the result is automated
IsAutomated bool `json:"is_automated"`
// Description is a textual description of the repository
Description string `json:"description"`
}
// SearchResults lists a collection search results returned from a registry
type SearchResults struct {
// Query contains the query string that generated the search results
Query string `json:"query"`
// NumResults indicates the number of results the query returned
NumResults int `json:"num_results"`
// Results is a slice containing the actual results for the search
Results []SearchResult `json:"results"`
}

Some files were not shown because too many files have changed in this diff Show More