vendoring dependencies

This commit is contained in:
ngtuna 2016-07-18 23:23:39 +07:00
parent d5414877b5
commit 78feadc695
771 changed files with 129881 additions and 24279 deletions

496
Godeps/Godeps.json generated
View File

@ -14,6 +14,11 @@
"ImportPath": "github.com/Azure/go-ansiterm/winterm",
"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
},
{
"ImportPath": "github.com/Microsoft/go-winio",
"Comment": "v0.1.0",
"Rev": "8f9387ea7efabb228a981b9c381142be7667967f"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.8.2",
@ -96,16 +101,6 @@
"Comment": "v2.1.1-162-gc6c9194",
"Rev": "c6c9194e9c6097f84b0ff468a741086ff7704aa3"
},
{
"ImportPath": "github.com/docker/distribution/manifest",
"Comment": "v2.1.1-162-gc6c9194",
"Rev": "c6c9194e9c6097f84b0ff468a741086ff7704aa3"
},
{
"ImportPath": "github.com/docker/distribution/manifest/schema1",
"Comment": "v2.1.1-162-gc6c9194",
"Rev": "c6c9194e9c6097f84b0ff468a741086ff7704aa3"
},
{
"ImportPath": "github.com/docker/distribution/reference",
"Comment": "v2.1.1-162-gc6c9194",
@ -151,251 +146,316 @@
"Comment": "v2.1.1-162-gc6c9194",
"Rev": "c6c9194e9c6097f84b0ff468a741086ff7704aa3"
},
{
"ImportPath": "github.com/docker/docker/api/client/bundlefile",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/api/types/backend",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/builder",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/builder/dockerignore",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/cliconfig",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/dockerversion",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/docker/cliconfig/configfile",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/errors",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/docker/daemon/graphdriver",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/image",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/image/v1",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/layer",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/opts",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/archive",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/chrootarchive",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/fileutils",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/gitutils",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/homedir",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/httputils",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/idtools",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/ioutils",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/jsonlog",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/jsonmessage",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/longpath",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/mflag",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/mount",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/nat",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/docker/pkg/plugins",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/parsers",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/parsers/kernel",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/docker/pkg/plugins/transport",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/pools",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/progress",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/promise",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/random",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/reexec",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/signal",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/stdcopy",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/streamformatter",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/stringid",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/stringutils",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/symlink",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/system",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/tarsum",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/term",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/term/windows",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/timeutils",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/tlsconfig",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/ulimit",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/units",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/urlutil",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/pkg/useragent",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
},
{
"ImportPath": "github.com/docker/docker/pkg/version",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/docker/reference",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/registry",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/runconfig",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/docker/runconfig/opts",
"Comment": "v1.12.0-rc4",
"Rev": "e4a0dbc47232e3a9da4cfe6ce44f250e6e85ed43"
},
{
"ImportPath": "github.com/docker/docker/utils",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/engine-api/client",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/docker/volume",
"Comment": "v1.4.1-7619-g58b270c",
"Rev": "58b270c338e831ac6668a29788c72d202f9fc251"
"ImportPath": "github.com/docker/engine-api/client/transport",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/client/transport/cancellable",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/blkiodev",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/container",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/events",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/filters",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/network",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/reference",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/registry",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/strslice",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/swarm",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/time",
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/engine-api/types/versions",
"Comment": "v0.3.1-62-g3d72d39",
"Rev": "3d72d392d07bece8d7d7b2a3b6b2e57c2df376a2"
"Comment": "v0.3.1-232-g1d24745",
"Rev": "1d247454d4307fb1ddf10d09fd2996394b085904"
},
{
"ImportPath": "github.com/docker/go-connections/nat",
"Comment": "v0.2.0-2-gf549a93",
"Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
},
{
"ImportPath": "github.com/docker/go-connections/sockets",
"Comment": "v0.2.0-2-gf549a93",
"Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
},
{
"ImportPath": "github.com/docker/go-connections/tlsconfig",
"Comment": "v0.2.0-2-gf549a93",
"Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
},
{
"ImportPath": "github.com/docker/go-units",
"Comment": "v0.1.0-21-g0bbddae",
@ -403,31 +463,78 @@
},
{
"ImportPath": "github.com/docker/libcompose/cli/logger",
"Rev": "8dc9ff33a233f3d23293e548a66e4acda3a73f2a"
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/config",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/docker",
"Rev": "8dc9ff33a233f3d23293e548a66e4acda3a73f2a"
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/docker/builder",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/docker/client",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/docker/network",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/labels",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/logger",
"Rev": "8dc9ff33a233f3d23293e548a66e4acda3a73f2a"
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/lookup",
"Rev": "8dc9ff33a233f3d23293e548a66e4acda3a73f2a"
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/project",
"Rev": "8dc9ff33a233f3d23293e548a66e4acda3a73f2a"
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/project/events",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/project/options",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/utils",
"Rev": "8dc9ff33a233f3d23293e548a66e4acda3a73f2a"
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libtrust",
"Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a"
"ImportPath": "github.com/docker/libcompose/version",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/docker/libcompose/yaml",
"Comment": "v0.2.0-186-ga12288b",
"Rev": "a12288bd636066e330b5d39197737cd04882d164"
},
{
"ImportPath": "github.com/emicklei/go-restful",
@ -456,70 +563,6 @@
"ImportPath": "github.com/flynn/go-shlex",
"Rev": "3f9db97f856818214da2e1057f8ad84803971cff"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
"Rev": "39d9fefa6a7fd4ef5a4a02c5f566cb83b73c7293"
},
{
"ImportPath": "github.com/ghodss/yaml",
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
@ -651,6 +694,37 @@
"Comment": "v1.14.0",
"Rev": "71f57d300dd6a780ac1856c005c4b518cfd498ec"
},
{
"ImportPath": "github.com/vbatts/tar-split/archive/tar",
"Comment": "v0.9.13-6-g28bc4c3",
"Rev": "28bc4c32f9fa9725118a685c9ddd7ffdbdbfe2c8"
},
{
"ImportPath": "github.com/vbatts/tar-split/tar/asm",
"Comment": "v0.9.13-6-g28bc4c3",
"Rev": "28bc4c32f9fa9725118a685c9ddd7ffdbdbfe2c8"
},
{
"ImportPath": "github.com/vbatts/tar-split/tar/storage",
"Comment": "v0.9.13-6-g28bc4c3",
"Rev": "28bc4c32f9fa9725118a685c9ddd7ffdbdbfe2c8"
},
{
"ImportPath": "github.com/vdemeester/docker-events",
"Rev": "be74d4929ec1ad118df54349fda4b0cba60f849b"
},
{
"ImportPath": "github.com/xeipuuv/gojsonpointer",
"Rev": "e0fe6f68307607d540ed8eac07a342c33fa1b54a"
},
{
"ImportPath": "github.com/xeipuuv/gojsonreference",
"Rev": "e02fc20de94c78484cd5ffb007f8af96be030a45"
},
{
"ImportPath": "github.com/xeipuuv/gojsonschema",
"Rev": "ac452913faa25c08bb78810d3e6f88b8a39f8f25"
},
{
"ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
@ -671,6 +745,10 @@
"ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/net/proxy",
"Rev": "62685c2d7ca23c807425dca88b11a3e2323dab41"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9"
@ -691,6 +769,10 @@
"ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88"

View File

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Copyright (c) 2015 Microsoft
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -9,13 +9,14 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

15
vendor/github.com/Microsoft/go-winio/README.md generated vendored Normal file
View File

@ -0,0 +1,15 @@
# go-winio
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and
for using named pipes as a net transport.
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
newer operating systems. This is similar to the implementation of network sockets in Go's net
package.
Please see the LICENSE file for licensing information.
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.

241
vendor/github.com/Microsoft/go-winio/backup.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package winio
import (
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"syscall"
"unicode/utf16"
)
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
const (
BackupData = uint32(iota + 1)
BackupEaData
BackupSecurity
BackupAlternateData
BackupLink
BackupPropertyData
BackupObjectId
BackupReparseData
BackupSparseBlock
BackupTxfsData
StreamSparseAttributes = uint32(8)
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
Name string // The name of the stream (for BackupAlternateData only).
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
type win32StreamId struct {
StreamId uint32
Attributes uint32
Size uint64
NameSize uint32
}
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
// of BackupHeader values.
type BackupStreamReader struct {
r io.Reader
bytesLeft int64
}
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
return &BackupStreamReader{r, 0}
}
// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if r.bytesLeft > 0 {
if _, err := io.Copy(ioutil.Discard, r); err != nil {
return nil, err
}
}
var wsi win32StreamId
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
Id: wsi.StreamId,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
if wsi.NameSize != 0 {
name := make([]uint16, int(wsi.NameSize/2))
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
return nil, err
}
hdr.Name = syscall.UTF16ToString(name)
}
if wsi.StreamId == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
hdr.Size -= 8
}
r.bytesLeft = hdr.Size
return hdr, nil
}
// Read reads from the current backup stream.
func (r *BackupStreamReader) Read(b []byte) (int, error) {
if r.bytesLeft == 0 {
return 0, io.EOF
}
if int64(len(b)) > r.bytesLeft {
b = b[:r.bytesLeft]
}
n, err := r.r.Read(b)
r.bytesLeft -= int64(n)
if err == io.EOF {
err = io.ErrUnexpectedEOF
} else if r.bytesLeft == 0 && err == nil {
err = io.EOF
}
return n, err
}
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
type BackupStreamWriter struct {
w io.Writer
bytesLeft int64
}
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
return &BackupStreamWriter{w, 0}
}
// WriteHeader writes the next backup stream header and prepares for calls to Write().
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
if w.bytesLeft != 0 {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
wsi := win32StreamId{
StreamId: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
}
if hdr.Id == BackupSparseBlock {
// Include space for the int64 block offset
wsi.Size += 8
}
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
return err
}
if len(name) != 0 {
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
return err
}
}
if hdr.Id == BackupSparseBlock {
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
return err
}
}
w.bytesLeft = hdr.Size
return nil
}
// Write writes to the current backup stream.
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
if w.bytesLeft < int64(len(b)) {
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
}
n, err := w.w.Write(b)
w.bytesLeft -= int64(n)
return n, err
}
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
type BackupFileReader struct {
f *os.File
includeSecurity bool
ctx uintptr
}
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
// Read will attempt to read the security descriptor of the file.
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
r := &BackupFileReader{f, includeSecurity, 0}
runtime.SetFinalizer(r, func(r *BackupFileReader) { r.Close() })
return r
}
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
}
if bytesRead == 0 {
return 0, io.EOF
}
return int(bytesRead), nil
}
// Close frees Win32 resources associated with the BackupFileReader. It does not close
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
r.ctx = 0
}
return nil
}
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
type BackupFileWriter struct {
f *os.File
includeSecurity bool
ctx uintptr
}
// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true,
// Write() will attempt to restore the security descriptor from the stream.
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
w := &BackupFileWriter{f, includeSecurity, 0}
runtime.SetFinalizer(w, func(w *BackupFileWriter) { w.Close() })
return w
}
// Write restores a portion of the file using the provided backup stream.
func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
}
if int(bytesWritten) != len(b) {
return int(bytesWritten), errors.New("not all bytes could be written")
}
return len(b), nil
}
// Close frees Win32 resources associated with the BackupFileWriter. It does not
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
w.ctx = 0
}
return nil
}

219
vendor/github.com/Microsoft/go-winio/file.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
package winio
import (
"errors"
"io"
"runtime"
"sync"
"syscall"
"time"
)
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod
const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
)
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
)
type timeoutError struct{}
func (e *timeoutError) Error() string { return "i/o timeout" }
func (e *timeoutError) Timeout() bool { return true }
func (e *timeoutError) Temporary() bool { return true }
var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle
// ioResult contains the result of an asynchronous IO operation
type ioResult struct {
bytes uint32
err error
}
// ioOperation represents an outstanding asynchronous Win32 IO
type ioOperation struct {
o syscall.Overlapped
ch chan ioResult
}
func initIo() {
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
}
ioCompletionPort = h
go ioCompletionProcessor(h)
}
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
// It takes ownership of this handle and will close it if it is garbage collected.
type win32File struct {
handle syscall.Handle
wg sync.WaitGroup
closing bool
readDeadline time.Time
writeDeadline time.Time
}
// makeWin32File makes a new win32File from an existing file handle
func makeWin32File(h syscall.Handle) (*win32File, error) {
f := &win32File{handle: h}
ioInitOnce.Do(initIo)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
runtime.SetFinalizer(f, (*win32File).closeHandle)
return f, nil
}
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return makeWin32File(h)
}
// closeHandle closes the resources associated with a Win32 handle
func (f *win32File) closeHandle() {
if !f.closing {
// cancel all IO and wait for it to complete
f.closing = true
cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
syscall.Close(f.handle)
f.handle = 0
}
}
// Close closes a win32File.
func (f *win32File) Close() error {
f.closeHandle()
runtime.SetFinalizer(f, nil)
return nil
}
// prepareIo prepares for a new IO operation
func (f *win32File) prepareIo() (*ioOperation, error) {
f.wg.Add(1)
if f.closing {
return nil, ErrFileClosed
}
c := &ioOperation{}
c.ch = make(chan ioResult)
return c, nil
}
// ioCompletionProcessor processes completed async IOs forever
func ioCompletionProcessor(h syscall.Handle) {
// Set the timer resolution to 1. This fixes a performance regression in golang 1.6.
timeBeginPeriod(1)
for {
var bytes uint32
var key uintptr
var op *ioOperation
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE)
if op == nil {
panic(err)
}
op.ch <- ioResult{bytes, err}
}
}
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
func (f *win32File) asyncIo(c *ioOperation, deadline time.Time, bytes uint32, err error) (int, error) {
if err != syscall.ERROR_IO_PENDING {
f.wg.Done()
return int(bytes), err
} else {
var r ioResult
wait := true
timedout := false
if f.closing {
cancelIoEx(f.handle, &c.o)
} else if !deadline.IsZero() {
now := time.Now()
if !deadline.After(now) {
timedout = true
} else {
timeout := time.After(deadline.Sub(now))
select {
case r = <-c.ch:
wait = false
case <-timeout:
timedout = true
}
}
}
if timedout {
cancelIoEx(f.handle, &c.o)
}
if wait {
r = <-c.ch
}
err = r.err
if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing {
err = ErrFileClosed
} else if timedout {
err = ErrTimeout
}
}
f.wg.Done()
return int(r.bytes), err
}
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
c, err := f.prepareIo()
if err != nil {
return 0, err
}
var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
n, err := f.asyncIo(c, f.readDeadline, bytes, err)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
} else if err == syscall.ERROR_BROKEN_PIPE {
return 0, io.EOF
} else {
return n, err
}
}
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
c, err := f.prepareIo()
if err != nil {
return 0, err
}
var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
return f.asyncIo(c, f.writeDeadline, bytes, err)
}
func (f *win32File) SetReadDeadline(t time.Time) error {
f.readDeadline = t
return nil
}
func (f *win32File) SetWriteDeadline(t time.Time) error {
f.writeDeadline = t
return nil
}

30
vendor/github.com/Microsoft/go-winio/fileinfo.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package winio
import (
"os"
"syscall"
"unsafe"
)
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uintptr // includes padding
}
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return nil, &os.PathError{"GetFileInformationByHandleEx", f.Name(), err}
}
return bi, nil
}
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
return &os.PathError{"SetFileInformationByHandle", f.Name(), err}
}
return nil
}

398
vendor/github.com/Microsoft/go-winio/pipe.go generated vendored Normal file
View File

@ -0,0 +1,398 @@
package winio
import (
"errors"
"io"
"net"
"os"
"syscall"
"time"
"unsafe"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
type securityAttributes struct {
Length uint32
SecurityDescriptor *byte
InheritHandle uint32
}
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cPIPE_ACCESS_DUPLEX = 0x3
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
cPIPE_UNLIMITED_INSTANCES = 255
cNMPWAIT_USE_DEFAULT_WAIT = 0
cNMPWAIT_NOWAIT = 1
cPIPE_TYPE_MESSAGE = 4
cPIPE_READMODE_MESSAGE = 2
)
var (
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
// This error should match net.errClosing since docker takes a dependency on its text.
ErrPipeListenerClosed = errors.New("use of closed network connection")
errPipeWriteClosed = errors.New("pipe has been closed for write")
)
type win32Pipe struct {
*win32File
path string
}
type win32MessageBytePipe struct {
win32Pipe
writeClosed bool
readEOF bool
}
type pipeAddress string
func (f *win32Pipe) LocalAddr() net.Addr {
return pipeAddress(f.path)
}
func (f *win32Pipe) RemoteAddr() net.Addr {
return pipeAddress(f.path)
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
f.SetReadDeadline(t)
f.SetWriteDeadline(t)
return nil
}
// CloseWrite closes the write side of a message pipe in byte mode.
func (f *win32MessageBytePipe) CloseWrite() error {
if f.writeClosed {
return errPipeWriteClosed
}
_, err := f.win32File.Write(nil)
if err != nil {
return err
}
f.writeClosed = true
return nil
}
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
// they are used to implement CloseWrite().
func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
if f.writeClosed {
return 0, errPipeWriteClosed
}
if len(b) == 0 {
return 0, nil
}
return f.win32File.Write(b)
}
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
// mode pipe will return io.EOF, as will all subsequent reads.
func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
if f.readEOF {
return 0, io.EOF
}
n, err := f.win32File.Read(b)
if err == io.EOF {
// If this was the result of a zero-byte read, then
// it is possible that the read was due to a zero-size
// message. Since we are simulating CloseWrite with a
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
}
return n, err
}
func (s pipeAddress) Network() string {
return "pipe"
}
func (s pipeAddress) String() string {
return string(s)
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then the timeout
// is the default timeout established by the pipe server.
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
}
var err error
var h syscall.Handle
for {
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != cERROR_PIPE_BUSY {
break
}
now := time.Now()
var ms uint32
if absTimeout.IsZero() {
ms = cNMPWAIT_USE_DEFAULT_WAIT
} else if now.After(absTimeout) {
ms = cNMPWAIT_NOWAIT
} else {
ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000)
}
err = waitNamedPipe(path, ms)
if err != nil {
if err == cERROR_SEM_TIMEOUT {
return nil, ErrTimeout
}
break
}
}
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
}
var flags uint32
err = getNamedPipeInfo(h, &flags, nil, nil, nil)
if err != nil {
return nil, err
}
var state uint32
err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0)
if err != nil {
return nil, err
}
if state&cPIPE_READMODE_MESSAGE != 0 {
return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")}
}
f, err := makeWin32File(h)
if err != nil {
syscall.Close(h)
return nil, err
}
// If the pipe is in message mode, return a message byte pipe, which
// supports CloseWrite().
if flags&cPIPE_TYPE_MESSAGE != 0 {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: f, path: path},
}, nil
}
return &win32Pipe{win32File: f, path: path}, nil
}
type acceptResponse struct {
f *win32File
err error
}
type win32PipeListener struct {
firstHandle syscall.Handle
path string
securityDescriptor []byte
config PipeConfig
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
}
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
if first {
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
}
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
if c.MessageMode {
mode |= cPIPE_TYPE_MESSAGE
}
var sa securityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
if securityDescriptor != nil {
sa.SecurityDescriptor = &securityDescriptor[0]
}
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, &sa)
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
return h, nil
}
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
if err != nil {
return nil, err
}
f, err := makeWin32File(h)
if err != nil {
syscall.Close(h)
return nil, err
}
return f, nil
}
func (l *win32PipeListener) listenerRoutine() {
closed := false
for !closed {
select {
case <-l.closeCh:
closed = true
case responseCh := <-l.acceptCh:
p, err := l.makeServerPipe()
if err == nil {
// Wait for the client to connect.
ch := make(chan error)
go func() {
ch <- connectPipe(p)
}()
select {
case err = <-ch:
if err != nil {
p.Close()
p = nil
}
case <-l.closeCh:
// Abort the connect request by closing the handle.
p.Close()
p = nil
err = <-ch
if err == nil || err == ErrFileClosed {
err = ErrPipeListenerClosed
}
closed = true
}
}
responseCh <- acceptResponse{p, err}
}
}
syscall.Close(l.firstHandle)
l.firstHandle = 0
// Notify Close() and Accept() callers that the handle has been closed.
close(l.doneCh)
}
// PipeConfig contain configuration for the pipe listener.
type PipeConfig struct {
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
SecurityDescriptor string
// MessageMode determines whether the pipe is in byte or message mode. In either
// case the pipe is read in byte mode by default. The only practical difference in
// this implementation is that CloseWrite() is only supported for message mode pipes;
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
// transferred to the reader (and returned as io.EOF in this implementation)
// when the pipe is in message mode.
MessageMode bool
// InputBufferSize specifies the size the input buffer, in bytes.
InputBufferSize int32
// OutputBufferSize specifies the size the input buffer, in bytes.
OutputBufferSize int32
}
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
// The pipe must not already exist.
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
var (
sd []byte
err error
)
if c == nil {
c = &PipeConfig{}
}
if c.SecurityDescriptor != "" {
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
if err != nil {
return nil, err
}
}
h, err := makeServerPipeHandle(path, sd, c, true)
if err != nil {
return nil, err
}
// Immediately open and then close a client handle so that the named pipe is
// created but not currently accepting connections.
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,
path: path,
securityDescriptor: sd,
config: *c,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
}
go l.listenerRoutine()
return l, nil
}
func connectPipe(p *win32File) error {
c, err := p.prepareIo()
if err != nil {
return err
}
err = connectNamedPipe(p.handle, &c.o)
_, err = p.asyncIo(c, time.Time{}, 0, err)
if err != nil && err != cERROR_PIPE_CONNECTED {
return err
}
return nil
}
func (l *win32PipeListener) Accept() (net.Conn, error) {
ch := make(chan acceptResponse)
select {
case l.acceptCh <- ch:
response := <-ch
err := response.err
if err != nil {
return nil, err
}
if l.config.MessageMode {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: response.f, path: l.path},
}, nil
}
return &win32Pipe{win32File: response.f, path: l.path}, nil
case <-l.doneCh:
return nil, ErrPipeListenerClosed
}
}
func (l *win32PipeListener) Close() error {
select {
case l.closeCh <- 1:
<-l.doneCh
case <-l.doneCh:
}
return nil
}
func (l *win32PipeListener) Addr() net.Addr {
return pipeAddress(l.path)
}

150
vendor/github.com/Microsoft/go-winio/privilege.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package winio
import (
"bytes"
"encoding/binary"
"fmt"
"runtime"
"syscall"
"unicode/utf16"
)
//sys adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
//sys revertToSelf() (err error) = advapi32.RevertToSelf
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) = advapi32.OpenThreadToken
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
SE_PRIVILEGE_ENABLED = 2
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
SeBackupPrivilege = "SeBackupPrivilege"
SeRestorePrivilege = "SeRestorePrivilege"
)
const (
securityAnonymous = iota
securityIdentification
securityImpersonation
securityDelegation
)
type PrivilegeError struct {
privileges []uint64
}
func (e *PrivilegeError) Error() string {
s := ""
if len(e.privileges) > 1 {
s = "Could not enable privileges "
} else {
s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
s += ", "
}
s += `"`
s += getPrivilegeName(p)
s += `"`
}
return s
}
func RunWithPrivilege(name string, fn func() error) error {
return RunWithPrivileges([]string{name}, fn)
}
func RunWithPrivileges(names []string, fn func() error) error {
var privileges []uint64
for _, name := range names {
p := uint64(0)
err := lookupPrivilegeValue("", name, &p)
if err != nil {
return err
}
privileges = append(privileges, p)
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
token, err := newThreadToken()
if err != nil {
return err
}
defer releaseThreadToken(token)
err = adjustPrivileges(token, privileges)
if err != nil {
return err
}
return fn()
}
func adjustPrivileges(token syscall.Handle, privileges []uint64) error {
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED))
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
if !success {
return err
}
if err == ERROR_NOT_ALL_ASSIGNED {
return &PrivilegeError{privileges}
}
return nil
}
func getPrivilegeName(luid uint64) string {
var nameBuffer [256]uint16
bufSize := uint32(len(nameBuffer))
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
if err != nil {
return fmt.Sprintf("<unknown privilege %d>", luid)
}
var displayNameBuffer [256]uint16
displayBufSize := uint32(len(displayNameBuffer))
var langId uint32
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langId)
if err != nil {
return fmt.Sprintf("<unknown privilege %s>", utf16.Decode(nameBuffer[:bufSize]))
}
return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
}
func newThreadToken() (syscall.Handle, error) {
err := impersonateSelf(securityImpersonation)
if err != nil {
panic(err)
return 0, err
}
var token syscall.Handle
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token)
if err != nil {
rerr := revertToSelf()
if rerr != nil {
panic(rerr)
}
return 0, err
}
return token, nil
}
func releaseThreadToken(h syscall.Handle) {
err := revertToSelf()
if err != nil {
panic(err)
}
syscall.Close(h)
}

124
vendor/github.com/Microsoft/go-winio/reparse.go generated vendored Normal file
View File

@ -0,0 +1,124 @@
package winio
import (
"bytes"
"encoding/binary"
"fmt"
"strings"
"unicode/utf16"
"unsafe"
)
const (
reparseTagMountPoint = 0xA0000003
reparseTagSymlink = 0xA000000C
)
type reparseDataBuffer struct {
ReparseTag uint32
ReparseDataLength uint16
Reserved uint16
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
}
// ReparsePoint describes a Win32 symlink or mount point.
type ReparsePoint struct {
Target string
IsMountPoint bool
}
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
// mount point reparse point.
type UnsupportedReparsePointError struct {
Tag uint32
}
func (e *UnsupportedReparsePointError) Error() string {
return fmt.Sprintf("unsupported reparse point %x", e.Tag)
}
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
// or a mount point.
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
isMountPoint := false
tag := binary.LittleEndian.Uint32(b[0:4])
switch tag {
case reparseTagMountPoint:
isMountPoint = true
case reparseTagSymlink:
default:
return nil, &UnsupportedReparsePointError{tag}
}
nameOffset := 16 + binary.LittleEndian.Uint16(b[12:14])
if !isMountPoint {
nameOffset += 4
}
nameLength := binary.LittleEndian.Uint16(b[14:16])
name := make([]uint16, nameLength/2)
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
if err != nil {
return nil, err
}
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
}
func isDriveLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
// mount point.
func EncodeReparsePoint(rp *ReparsePoint) []byte {
// Generate an NT path and determine if this is a relative path.
var ntTarget string
relative := false
if strings.HasPrefix(rp.Target, `\\?\`) {
ntTarget = rp.Target
} else if strings.HasPrefix(rp.Target, `\\`) {
ntTarget = `\??\UNC\` + rp.Target[2:]
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
ntTarget = `\??\` + rp.Target
} else {
ntTarget = rp.Target
relative = true
}
// The paths must be NUL-terminated even though they are counted strings.
target16 := utf16.Encode([]rune(rp.Target + "\x00"))
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
size += len(ntTarget16)*2 + len(target16)*2
tag := uint32(reparseTagMountPoint)
if !rp.IsMountPoint {
tag = reparseTagSymlink
size += 4 // Add room for symlink flags
}
data := reparseDataBuffer{
ReparseTag: tag,
ReparseDataLength: uint16(size),
SubstituteNameOffset: 0,
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
PrintNameOffset: uint16(len(ntTarget16) * 2),
PrintNameLength: uint16((len(target16) - 1) * 2),
}
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
binary.Write(&b, binary.LittleEndian, flags)
}
binary.Write(&b, binary.LittleEndian, ntTarget16)
binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}

96
vendor/github.com/Microsoft/go-winio/sd.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
package winio
import (
"syscall"
"unsafe"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
//sys localFree(mem uintptr) = LocalFree
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
const (
cERROR_NONE_MAPPED = syscall.Errno(1332)
)
type AccountLookupError struct {
Name string
Err error
}
func (e *AccountLookupError) Error() string {
if e.Name == "" {
return "lookup account: empty account name specified"
}
var s string
switch e.Err {
case cERROR_NONE_MAPPED:
s = "not found"
default:
s = e.Err.Error()
}
return "lookup account " + e.Name + ": " + s
}
type SddlConversionError struct {
Sddl string
Err error
}
func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
// LookupSidByName looks up the SID of an account by name
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
refDomainBuffer := make([]uint16, refDomainSize)
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
if err != nil {
return "", &AccountLookupError{name, err}
}
var strBuffer *uint16
err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
if err != nil {
return "", &AccountLookupError{name, err}
}
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
localFree(uintptr(unsafe.Pointer(strBuffer)))
return sid, nil
}
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
var sdBuffer uintptr
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
if err != nil {
return nil, &SddlConversionError{sddl, err}
}
defer localFree(sdBuffer)
sd := make([]byte, getSecurityDescriptorLength(sdBuffer))
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)])
return sd, nil
}
func SecurityDescriptorToSddl(sd []byte) (string, error) {
var sddl *uint16
// The returned string length seems to including an aribtrary number of terminating NULs.
// Don't use it.
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
if err != nil {
return "", err
}
defer localFree(uintptr(unsafe.Pointer(sddl)))
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil
}

3
vendor/github.com/Microsoft/go-winio/syscall.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go

492
vendor/github.com/Microsoft/go-winio/zsyscall.go generated vendored Normal file
View File

@ -0,0 +1,492 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package winio
import "unsafe"
import "syscall"
var _ unsafe.Pointer
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
modwinmm = syscall.NewLazyDLL("winmm.dll")
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procLocalFree = modkernel32.NewProc("LocalFree")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx")
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle")
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
newport = syscall.Handle(r0)
if newport == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) {
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func timeBeginPeriod(period uint32) (n int32) {
r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
n = int32(r0)
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
}
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
}
func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func waitNamedPipe(name string, timeout uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _waitNamedPipe(_p0, timeout)
}
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
if err != nil {
return
}
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
}
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSidToStringSid(sid *byte, str **uint16) (err error) {
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(str)
if err != nil {
return
}
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func localFree(mem uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0)
return
}
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
return
}
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
var _p0 uint32
if releaseAll {
_p0 = 1
} else {
_p0 = 0
}
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
success = r0 != 0
if true {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func impersonateSelf(level uint32) (err error) {
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func revertToSelf() (err error) {
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) {
var _p0 uint32
if openAsSelf {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getCurrentThread() (h syscall.Handle) {
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0)
h = syscall.Handle(r0)
return
}
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
var _p1 *uint16
_p1, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _lookupPrivilegeValue(_p0, _p1, luid)
}
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeName(_p0, luid, buffer, size)
}
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
if err != nil {
return
}
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
}
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
}
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
var _p0 *byte
if len(b) > 0 {
_p0 = &b[0]
}
var _p1 uint32
if abort {
_p1 = 1
} else {
_p1 = 0
}
var _p2 uint32
if processSecurity {
_p2 = 1
} else {
_p2 = 0
}
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0)
if r1 == 0 {
if e1 != 0 {
err = error(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@ -1 +0,0 @@
package manifest

View File

@ -1,130 +0,0 @@
package schema1
import (
"encoding/json"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/manifest"
"github.com/docker/libtrust"
)
// TODO(stevvooe): When we rev the manifest format, the contents of this
// package should be moved to manifest/v1.
const (
// ManifestMediaType specifies the mediaType for the current version. Note
// that for schema version 1, the the media is optionally
// "application/json".
ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
)
// Manifest provides the base accessible fields for working with V2 image
// format in the registry.
type Manifest struct {
manifest.Versioned
// Name is the name of the image's repository
Name string `json:"name"`
// Tag is the tag of the image specified by this manifest
Tag string `json:"tag"`
// Architecture is the host architecture on which this image is intended to
// run
Architecture string `json:"architecture"`
// FSLayers is a list of filesystem layer blobSums contained in this image
FSLayers []FSLayer `json:"fsLayers"`
// History is a list of unstructured historical data for v1 compatibility
History []History `json:"history"`
}
// SignedManifest provides an envelope for a signed image manifest, including
// the format sensitive raw bytes. It contains fields to
type SignedManifest struct {
Manifest
// Raw is the byte representation of the ImageManifest, used for signature
// verification. The value of Raw must be used directly during
// serialization, or the signature check will fail. The manifest byte
// representation cannot change or it will have to be re-signed.
Raw []byte `json:"-"`
}
// UnmarshalJSON populates a new ImageManifest struct from JSON data.
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
sm.Raw = make([]byte, len(b), len(b))
copy(sm.Raw, b)
p, err := sm.Payload()
if err != nil {
return err
}
var manifest Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return err
}
sm.Manifest = manifest
return nil
}
// Payload returns the raw, signed content of the signed manifest. The
// contents can be used to calculate the content identifier.
func (sm *SignedManifest) Payload() ([]byte, error) {
jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
if err != nil {
return nil, err
}
// Resolve the payload in the manifest.
return jsig.Payload()
}
// Signatures returns the signatures as provided by
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
// signatures.
func (sm *SignedManifest) Signatures() ([][]byte, error) {
jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
if err != nil {
return nil, err
}
// Resolve the payload in the manifest.
return jsig.Signatures()
}
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
// contents. Applications requiring a marshaled signed manifest should simply
// use Raw directly, since the the content produced by json.Marshal will be
// compacted and will fail signature checks.
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
if len(sm.Raw) > 0 {
return sm.Raw, nil
}
// If the raw data is not available, just dump the inner content.
return json.Marshal(&sm.Manifest)
}
// FSLayer is a container struct for BlobSums defined in an image manifest
type FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// History stores unstructured v1 compatibility information
type History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
}

View File

@ -1,66 +0,0 @@
package schema1
import (
"crypto/x509"
"encoding/json"
"github.com/docker/libtrust"
)
// Sign signs the manifest with the provided private key, returning a
// SignedManifest. This typically won't be used within the registry, except
// for testing.
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.Sign(pk); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
Raw: pretty,
}, nil
}
// SignWithChain signs the manifest with the given private key and x509 chain.
// The public key of the first element in the chain must be the public key
// corresponding with the sign key.
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.SignWithChain(key, chain); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
Raw: pretty,
}, nil
}

View File

@ -1,32 +0,0 @@
package schema1
import (
"crypto/x509"
"github.com/Sirupsen/logrus"
"github.com/docker/libtrust"
)
// Verify verifies the signature of the signed manifest returning the public
// keys used during signing.
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
if err != nil {
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
return nil, err
}
return js.Verify()
}
// VerifyChains verifies the signature of the signed manifest against the
// certificate pool returning the list of verified chains. Signatures without
// an x509 chain are not checked.
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
if err != nil {
return nil, err
}
return js.VerifyChains(ca)
}

View File

@ -1,9 +0,0 @@
package manifest
// Versioned provides a struct with just the manifest schemaVersion. Incoming
// content with unknown schema version can be decoded against this struct to
// check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
}

View File

@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2015 Docker, Inc.
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
Docker
Copyright 2012-2015 Docker, Inc.
Copyright 2012-2016 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).

View File

@ -0,0 +1,71 @@
// +build experimental
package bundlefile
import (
"encoding/json"
"fmt"
"io"
)
// Bundlefile stores the contents of a bundlefile
type Bundlefile struct {
Version string
Services map[string]Service
}
// Service is a service from a bundlefile
type Service struct {
Image string
Command []string `json:",omitempty"`
Args []string `json:",omitempty"`
Env []string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Ports []Port `json:",omitempty"`
WorkingDir *string `json:",omitempty"`
User *string `json:",omitempty"`
Networks []string `json:",omitempty"`
}
// Port is a port as defined in a bundlefile
type Port struct {
Protocol string
Port uint32
}
// LoadFile loads a bundlefile from a path to the file
func LoadFile(reader io.Reader) (*Bundlefile, error) {
bundlefile := &Bundlefile{}
decoder := json.NewDecoder(reader)
if err := decoder.Decode(bundlefile); err != nil {
switch jsonErr := err.(type) {
case *json.SyntaxError:
return nil, fmt.Errorf(
"JSON syntax error at byte %v: %s",
jsonErr.Offset,
jsonErr.Error())
case *json.UnmarshalTypeError:
return nil, fmt.Errorf(
"Unexpected type at byte %v. Expected %s but received %s.",
jsonErr.Offset,
jsonErr.Type,
jsonErr.Value)
}
return nil, err
}
return bundlefile, nil
}
// Print writes the contents of the bundlefile to the output writer
// as human readable json
func Print(out io.Writer, bundle *Bundlefile) error {
bytes, err := json.MarshalIndent(*bundle, "", " ")
if err != nil {
return err
}
_, err = out.Write(bytes)
return err
}

View File

@ -0,0 +1,85 @@
// Package backend includes types to send information to server backends.
// TODO(calavera): This package is pending of extraction to engine-api
// when the server package is clean of daemon dependencies.
package backend
import (
"io"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/engine-api/types"
)
// ContainerAttachConfig holds the streams to use when connecting to a container to view logs.
type ContainerAttachConfig struct {
GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error)
UseStdin bool
UseStdout bool
UseStderr bool
Logs bool
Stream bool
DetachKeys string
// Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly.
// TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change...
// HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream.
// Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately.
MuxStreams bool
}
// ContainerLogsConfig holds configs for logging operations. Exists
// for users of the backend to to pass it a logging configuration.
type ContainerLogsConfig struct {
types.ContainerLogsOptions
OutStream io.Writer
}
// ContainerStatsConfig holds information for configuring the runtime
// behavior of a backend.ContainerStats() call.
type ContainerStatsConfig struct {
Stream bool
OutStream io.Writer
Version string
}
// ExecInspect holds information about a running process started
// with docker exec.
type ExecInspect struct {
ID string
Running bool
ExitCode *int
ProcessConfig *ExecProcessConfig
OpenStdin bool
OpenStderr bool
OpenStdout bool
CanRemove bool
ContainerID string
DetachKeys []byte
}
// ExecProcessConfig holds information about the exec process
// running on the host.
type ExecProcessConfig struct {
Tty bool `json:"tty"`
Entrypoint string `json:"entrypoint"`
Arguments []string `json:"arguments"`
Privileged *bool `json:"privileged,omitempty"`
User string `json:"user,omitempty"`
}
// ContainerCommitConfig is a wrapper around
// types.ContainerCommitConfig that also
// transports configuration changes for a container.
type ContainerCommitConfig struct {
types.ContainerCommitConfig
Changes []string
}
// ProgressWriter is an interface
// to transport progress streams.
type ProgressWriter struct {
Output io.Writer
StdoutFormatter *streamformatter.StdoutFormatter
StderrFormatter *streamformatter.StderrFormatter
ProgressReaderFunc func(io.ReadCloser) io.ReadCloser
}

155
vendor/github.com/docker/docker/builder/builder.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
// Package builder defines interfaces for any Docker builder to implement.
//
// Historically, only server-side Dockerfile interpreters existed.
// This package allows for other implementations of Docker builders.
package builder
import (
"io"
"os"
"time"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/image"
"github.com/docker/docker/reference"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"golang.org/x/net/context"
)
const (
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
DefaultDockerfileName string = "Dockerfile"
)
// Context represents a file system tree.
type Context interface {
// Close allows to signal that the filesystem tree won't be used anymore.
// For Context implementations using a temporary directory, it is recommended to
// delete the temporary directory in Close().
Close() error
// Stat returns an entry corresponding to path if any.
// It is recommended to return an error if path was not found.
// If path is a symlink it also returns the path to the target file.
Stat(path string) (string, FileInfo, error)
// Open opens path from the context and returns a readable stream of it.
Open(path string) (io.ReadCloser, error)
// Walk walks the tree of the context with the function passed to it.
Walk(root string, walkFn WalkFunc) error
}
// WalkFunc is the type of the function called for each file or directory visited by Context.Walk().
type WalkFunc func(path string, fi FileInfo, err error) error
// ModifiableContext represents a modifiable Context.
// TODO: remove this interface once we can get rid of Remove()
type ModifiableContext interface {
Context
// Remove deletes the entry specified by `path`.
// It is usual for directory entries to delete all its subentries.
Remove(path string) error
}
// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file.
// TODO: remove this interface once pkg/archive exposes a walk function that Context can use.
type FileInfo interface {
os.FileInfo
Path() string
}
// PathFileInfo is a convenience struct that implements the FileInfo interface.
type PathFileInfo struct {
os.FileInfo
// FilePath holds the absolute path to the file.
FilePath string
// Name holds the basename for the file.
FileName string
}
// Path returns the absolute path to the file.
func (fi PathFileInfo) Path() string {
return fi.FilePath
}
// Name returns the basename of the file.
func (fi PathFileInfo) Name() string {
if fi.FileName != "" {
return fi.FileName
}
return fi.FileInfo.Name()
}
// Hashed defines an extra method intended for implementations of os.FileInfo.
type Hashed interface {
// Hash returns the hash of a file.
Hash() string
SetHash(string)
}
// HashedFileInfo is a convenient struct that augments FileInfo with a field.
type HashedFileInfo struct {
FileInfo
// FileHash represents the hash of a file.
FileHash string
}
// Hash returns the hash of a file.
func (fi HashedFileInfo) Hash() string {
return fi.FileHash
}
// SetHash sets the hash of a file.
func (fi *HashedFileInfo) SetHash(h string) {
fi.FileHash = h
}
// Backend abstracts calls to a Docker Daemon.
type Backend interface {
// TODO: use digest reference instead of name
// GetImageOnBuild looks up a Docker image referenced by `name`.
GetImageOnBuild(name string) (Image, error)
// TagImage tags an image with newTag
TagImageWithReference(image.ID, reference.Named) error
// PullOnBuild tells Docker to pull image referenced by `name`.
PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error)
// ContainerAttachRaw attaches to container.
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error
// ContainerCreate creates a new Docker container and returns potential warnings
ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error)
// ContainerRm removes a container specified by `id`.
ContainerRm(name string, config *types.ContainerRmConfig) error
// Commit creates a new Docker image from an existing Docker container.
Commit(string, *backend.ContainerCommitConfig) (string, error)
// ContainerKill stops the container execution abruptly.
ContainerKill(containerID string, sig uint64) error
// ContainerStart starts a new container
ContainerStart(containerID string, hostConfig *container.HostConfig, validateHostname bool) error
// ContainerWait stops processing until the given container is stopped.
ContainerWait(containerID string, timeout time.Duration) (int, error)
// ContainerUpdateCmdOnBuild updates container.Path and container.Args
ContainerUpdateCmdOnBuild(containerID string, cmd []string) error
// ContainerCopy copies/extracts a source FileInfo to a destination path inside a container
// specified by a container object.
// TODO: make an Extract method instead of passing `decompress`
// TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
// with Context.Walk
//ContainerCopy(name string, res string) (io.ReadCloser, error)
// TODO: use copyBackend api
CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error
}
// Image represents a Docker image used by the builder.
type Image interface {
ImageID() string
RunConfig() *container.Config
}
// ImageCache abstracts an image cache store.
// (parent image, child runconfig) -> child image
type ImageCache interface {
// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error)
}

260
vendor/github.com/docker/docker/builder/context.go generated vendored Normal file
View File

@ -0,0 +1,260 @@
package builder
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/gitutils"
"github.com/docker/docker/pkg/httputils"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
)
// ValidateContextDirectory checks if all the contents of the directory
// can be read and returns an error if some files can't be read
// symlinks which point to non-existing files don't trigger an error
func ValidateContextDirectory(srcPath string, excludes []string) error {
contextRoot, err := getContextRoot(srcPath)
if err != nil {
return err
}
return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
if os.IsPermission(err) {
return fmt.Errorf("can't stat '%s'", filePath)
}
if os.IsNotExist(err) {
return nil
}
return err
}
// skip this directory/file if it's not in the path, it won't get added to the context
if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil {
return err
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
return err
} else if skip {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
// skip checking if symlinks point to non-existing files, such symlinks can be useful
// also skip named pipes, because they hanging on open
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
return nil
}
if !f.IsDir() {
currentFile, err := os.Open(filePath)
if err != nil && os.IsPermission(err) {
return fmt.Errorf("no permission to read from '%s'", filePath)
}
currentFile.Close()
}
return nil
})
}
// GetContextFromReader will read the contents of the given reader as either a
// Dockerfile or tar archive. Returns a tar archive used as a context and a
// path to the Dockerfile inside the tar.
func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
buf := bufio.NewReader(r)
magic, err := buf.Peek(archive.HeaderSize)
if err != nil && err != io.EOF {
return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err)
}
if archive.IsArchive(magic) {
return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
}
// Input should be read as a Dockerfile.
tmpDir, err := ioutil.TempDir("", "docker-build-context-")
if err != nil {
return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err)
}
f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName))
if err != nil {
return nil, "", err
}
_, err = io.Copy(f, buf)
if err != nil {
f.Close()
return nil, "", err
}
if err := f.Close(); err != nil {
return nil, "", err
}
if err := r.Close(); err != nil {
return nil, "", err
}
tar, err := archive.Tar(tmpDir, archive.Uncompressed)
if err != nil {
return nil, "", err
}
return ioutils.NewReadCloserWrapper(tar, func() error {
err := tar.Close()
os.RemoveAll(tmpDir)
return err
}), DefaultDockerfileName, nil
}
// GetContextFromGitURL uses a Git URL as context for a `docker build`. The
// git repo is cloned into a temporary directory used as the context directory.
// Returns the absolute path to the temporary context directory, the relative
// path of the dockerfile in that context directory, and a non-nil error on
// success.
func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) {
if _, err := exec.LookPath("git"); err != nil {
return "", "", fmt.Errorf("unable to find 'git': %v", err)
}
if absContextDir, err = gitutils.Clone(gitURL); err != nil {
return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err)
}
return getDockerfileRelPath(absContextDir, dockerfileName)
}
// GetContextFromURL uses a remote URL as context for a `docker build`. The
// remote resource is downloaded as either a Dockerfile or a tar archive.
// Returns the tar archive used for the context and a path of the
// dockerfile inside the tar.
func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) {
response, err := httputils.Download(remoteURL)
if err != nil {
return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err)
}
progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true)
// Pass the response body through a progress reader.
progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL))
return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName)
}
// GetContextFromLocalDir uses the given local directory as context for a
// `docker build`. Returns the absolute path to the local context directory,
// the relative path of the dockerfile in that context directory, and a non-nil
// error on success.
func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) {
// When using a local context directory, when the Dockerfile is specified
// with the `-f/--file` option then it is considered relative to the
// current directory and not the context directory.
if dockerfileName != "" {
if dockerfileName, err = filepath.Abs(dockerfileName); err != nil {
return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err)
}
}
return getDockerfileRelPath(localDir, dockerfileName)
}
// getDockerfileRelPath uses the given context directory for a `docker build`
// and returns the absolute path to the context directory, the relative path of
// the dockerfile in that context directory, and a non-nil error on success.
func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) {
if absContextDir, err = filepath.Abs(givenContextDir); err != nil {
return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err)
}
// The context dir might be a symbolic link, so follow it to the actual
// target directory.
//
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
// paths (those starting with \\). This hack means that when using links
// on UNC paths, they will not be followed.
if !isUNC(absContextDir) {
absContextDir, err = filepath.EvalSymlinks(absContextDir)
if err != nil {
return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err)
}
}
stat, err := os.Lstat(absContextDir)
if err != nil {
return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err)
}
if !stat.IsDir() {
return "", "", fmt.Errorf("context must be a directory: %s", absContextDir)
}
absDockerfile := givenDockerfile
if absDockerfile == "" {
// No -f/--file was specified so use the default relative to the
// context directory.
absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName)
// Just to be nice ;-) look for 'dockerfile' too but only
// use it if we found it, otherwise ignore this check
if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) {
altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName))
if _, err = os.Lstat(altPath); err == nil {
absDockerfile = altPath
}
}
}
// If not already an absolute path, the Dockerfile path should be joined to
// the base directory.
if !filepath.IsAbs(absDockerfile) {
absDockerfile = filepath.Join(absContextDir, absDockerfile)
}
// Evaluate symlinks in the path to the Dockerfile too.
//
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
// paths (those starting with \\). This hack means that when using links
// on UNC paths, they will not be followed.
if !isUNC(absDockerfile) {
absDockerfile, err = filepath.EvalSymlinks(absDockerfile)
if err != nil {
return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err)
}
}
if _, err := os.Lstat(absDockerfile); err != nil {
if os.IsNotExist(err) {
return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile)
}
return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err)
}
if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil {
return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err)
}
if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir)
}
return absContextDir, relDockerfile, nil
}
// isUNC returns true if the path is UNC (one starting \\). It always returns
// false on Linux.
func isUNC(path string) bool {
return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`)
}

View File

@ -1,6 +1,6 @@
// +build !windows
package utils
package builder
import (
"path/filepath"

View File

@ -1,6 +1,6 @@
// +build windows
package utils
package builder
import (
"path/filepath"

View File

@ -0,0 +1,47 @@
package builder
import (
"os"
"github.com/docker/docker/builder/dockerignore"
"github.com/docker/docker/pkg/fileutils"
)
// DockerIgnoreContext wraps a ModifiableContext to add a method
// for handling the .dockerignore file at the root of the context.
type DockerIgnoreContext struct {
ModifiableContext
}
// Process reads the .dockerignore file at the root of the embedded context.
// If .dockerignore does not exist in the context, then nil is returned.
//
// It can take a list of files to be removed after .dockerignore is removed.
// This is used for server-side implementations of builders that need to send
// the .dockerignore file as well as the special files specified in filesToRemove,
// but expect them to be excluded from the context after they were processed.
//
// For example, server-side Dockerfile builders are expected to pass in the name
// of the Dockerfile to be removed after it was parsed.
//
// TODO: Don't require a ModifiableContext (use Context instead) and don't remove
// files, instead handle a list of files to be excluded from the context.
func (c DockerIgnoreContext) Process(filesToRemove []string) error {
f, err := c.Open(".dockerignore")
// Note that a missing .dockerignore file isn't treated as an error
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
excludes, _ := dockerignore.ReadAll(f)
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
for _, fileToRemove := range filesToRemove {
rm, _ := fileutils.Matches(fileToRemove, excludes)
if rm {
c.Remove(fileToRemove)
}
}
return nil
}

View File

@ -0,0 +1,49 @@
package dockerignore
import (
"bufio"
"bytes"
"fmt"
"io"
"path/filepath"
"strings"
)
// ReadAll reads a .dockerignore file and returns the list of file patterns
// to ignore. Note this will trim whitespace from each line as well
// as use GO's "clean" func to get the shortest/cleanest path for each.
func ReadAll(reader io.ReadCloser) ([]string, error) {
if reader == nil {
return nil, nil
}
defer reader.Close()
scanner := bufio.NewScanner(reader)
var excludes []string
currentLine := 0
utf8bom := []byte{0xEF, 0xBB, 0xBF}
for scanner.Scan() {
scannedBytes := scanner.Bytes()
// We trim UTF8 BOM
if currentLine == 0 {
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
}
pattern := string(scannedBytes)
currentLine++
// Lines starting with # (comments) are ignored before processing
if strings.HasPrefix(pattern, "#") {
continue
}
pattern = strings.TrimSpace(pattern)
if pattern == "" {
continue
}
pattern = filepath.Clean(pattern)
pattern = filepath.ToSlash(pattern)
excludes = append(excludes, pattern)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("Error reading .dockerignore: %v", err)
}
return excludes, nil
}

28
vendor/github.com/docker/docker/builder/git.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package builder
import (
"os"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/gitutils"
)
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
func MakeGitContext(gitURL string) (ModifiableContext, error) {
root, err := gitutils.Clone(gitURL)
if err != nil {
return nil, err
}
c, err := archive.Tar(root, archive.Uncompressed)
if err != nil {
return nil, err
}
defer func() {
// TODO: print errors?
c.Close()
os.RemoveAll(root)
}()
return MakeTarSumContext(c)
}

152
vendor/github.com/docker/docker/builder/remote.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
package builder
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"regexp"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/httputils"
"github.com/docker/docker/pkg/urlutil"
)
// When downloading remote contexts, limit the amount (in bytes)
// to be read from the response body in order to detect its Content-Type
const maxPreambleLength = 100
const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))`
var mimeRe = regexp.MustCompile(acceptableRemoteMIME)
// MakeRemoteContext downloads a context from remoteURL and returns it.
//
// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of
// maxPreambleLength bytes from the body to help detecting the MIME type.
// Look at acceptableRemoteMIME for more details.
//
// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected
// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not).
// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned.
func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) {
f, err := httputils.Download(remoteURL)
if err != nil {
return nil, fmt.Errorf("Error downloading remote context %s: %v", remoteURL, err)
}
defer f.Body.Close()
var contextReader io.ReadCloser
if contentTypeHandlers != nil {
contentType := f.Header.Get("Content-Type")
clen := f.ContentLength
contentType, contextReader, err = inspectResponse(contentType, f.Body, clen)
if err != nil {
return nil, fmt.Errorf("Error detecting content type for remote %s: %v", remoteURL, err)
}
defer contextReader.Close()
// This loop tries to find a content-type handler for the detected content-type.
// If it could not find one from the caller-supplied map, it tries the empty content-type `""`
// which is interpreted as a fallback handler (usually used for raw tar contexts).
for _, ct := range []string{contentType, ""} {
if fn, ok := contentTypeHandlers[ct]; ok {
defer contextReader.Close()
if contextReader, err = fn(contextReader); err != nil {
return nil, err
}
break
}
}
}
// Pass through - this is a pre-packaged context, presumably
// with a Dockerfile with the right name inside it.
return MakeTarSumContext(contextReader)
}
// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used
// irrespective of user input.
// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint).
func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) {
switch {
case remoteURL == "":
context, err = MakeTarSumContext(r)
case urlutil.IsGitURL(remoteURL):
context, err = MakeGitContext(remoteURL)
case urlutil.IsURL(remoteURL):
context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
dockerfile, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
// dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller
// should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input.
dockerfileName = DefaultDockerfileName
// TODO: return a context without tarsum
return archive.Generate(dockerfileName, string(dockerfile))
},
// fallback handler (tar context)
"": func(rc io.ReadCloser) (io.ReadCloser, error) {
return createProgressReader(rc), nil
},
})
default:
err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
}
return
}
// inspectResponse looks into the http response data at r to determine whether its
// content-type is on the list of acceptable content types for remote build contexts.
// This function returns:
// - a string representation of the detected content-type
// - an io.Reader for the response body
// - an error value which will be non-nil either when something goes wrong while
// reading bytes from r or when the detected content-type is not acceptable.
func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
plen := clen
if plen <= 0 || plen > maxPreambleLength {
plen = maxPreambleLength
}
preamble := make([]byte, plen, plen)
rlen, err := r.Read(preamble)
if rlen == 0 {
return ct, r, errors.New("Empty response")
}
if err != nil && err != io.EOF {
return ct, r, err
}
preambleR := bytes.NewReader(preamble)
bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r))
// Some web servers will use application/octet-stream as the default
// content type for files without an extension (e.g. 'Dockerfile')
// so if we receive this value we better check for text content
contentType := ct
if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
contentType, _, err = httputils.DetectContentType(preamble)
if err != nil {
return contentType, bodyReader, err
}
}
contentType = selectAcceptableMIME(contentType)
var cterr error
if len(contentType) == 0 {
cterr = fmt.Errorf("unsupported Content-Type %q", ct)
contentType = ct
}
return contentType, bodyReader, cterr
}
func selectAcceptableMIME(ct string) string {
return mimeRe.FindString(ct)
}

158
vendor/github.com/docker/docker/builder/tarsum.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
package builder
import (
"fmt"
"io"
"os"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/tarsum"
)
type tarSumContext struct {
root string
sums tarsum.FileInfoSums
}
func (c *tarSumContext) Close() error {
return os.RemoveAll(c.root)
}
func convertPathError(err error, cleanpath string) error {
if err, ok := err.(*os.PathError); ok {
err.Path = cleanpath
return err
}
return err
}
func (c *tarSumContext) Open(path string) (io.ReadCloser, error) {
cleanpath, fullpath, err := c.normalize(path)
if err != nil {
return nil, err
}
r, err := os.Open(fullpath)
if err != nil {
return nil, convertPathError(err, cleanpath)
}
return r, nil
}
func (c *tarSumContext) Stat(path string) (string, FileInfo, error) {
cleanpath, fullpath, err := c.normalize(path)
if err != nil {
return "", nil, err
}
st, err := os.Lstat(fullpath)
if err != nil {
return "", nil, convertPathError(err, cleanpath)
}
rel, err := filepath.Rel(c.root, fullpath)
if err != nil {
return "", nil, convertPathError(err, cleanpath)
}
// We set sum to path by default for the case where GetFile returns nil.
// The usual case is if relative path is empty.
sum := path
// Use the checksum of the followed path(not the possible symlink) because
// this is the file that is actually copied.
if tsInfo := c.sums.GetFile(rel); tsInfo != nil {
sum = tsInfo.Sum()
}
fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum}
return rel, fi, nil
}
// MakeTarSumContext returns a build Context from a tar stream.
//
// It extracts the tar stream to a temporary folder that is deleted as soon as
// the Context is closed.
// As the extraction happens, a tarsum is calculated for every file, and the set of
// all those sums then becomes the source of truth for all operations on this Context.
//
// Closing tarStream has to be done by the caller.
func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) {
root, err := ioutils.TempDir("", "docker-builder")
if err != nil {
return nil, err
}
tsc := &tarSumContext{root: root}
// Make sure we clean-up upon error. In the happy case the caller
// is expected to manage the clean-up
defer func() {
if err != nil {
tsc.Close()
}
}()
decompressedStream, err := archive.DecompressStream(tarStream)
if err != nil {
return nil, err
}
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
if err != nil {
return nil, err
}
if err := chrootarchive.Untar(sum, root, nil); err != nil {
return nil, err
}
tsc.sums = sum.GetSums()
return tsc, nil
}
func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) {
cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root)
if err != nil {
return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath)
}
_, err = os.Lstat(fullpath)
if err != nil {
return "", "", convertPathError(err, path)
}
return
}
func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error {
root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root))
return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error {
rel, err := filepath.Rel(c.root, fullpath)
if err != nil {
return err
}
if rel == "." {
return nil
}
sum := rel
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
sum = tsInfo.Sum()
}
fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum}
if err := walkFn(rel, fi, nil); err != nil {
return err
}
return nil
})
}
func (c *tarSumContext) Remove(path string) error {
_, fullpath, err := c.normalize(path)
if err != nil {
return err
}
return os.RemoveAll(fullpath)
}

View File

@ -1,27 +1,21 @@
package cliconfig
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/cliconfig/configfile"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/engine-api/types"
)
const (
// ConfigFileName is the name of config file
ConfigFileName = "config.json"
configFileDir = ".docker"
oldConfigfile = ".dockercfg"
// This constant is only used for really old config files when the
// URL wasn't saved as part of the config file and it was just
// assumed to be this value.
defaultIndexserver = "https://index.docker.io/v1/"
)
var (
@ -30,7 +24,7 @@ var (
func init() {
if configDir == "" {
configDir = filepath.Join(homedir.Get(), ".docker")
configDir = filepath.Join(homedir.Get(), configFileDir)
}
}
@ -44,99 +38,20 @@ func SetConfigDir(dir string) {
configDir = dir
}
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth"`
Email string `json:"email"`
ServerAddress string `json:"serveraddress,omitempty"`
}
// ConfigFile ~/.docker/config.json file info
type ConfigFile struct {
AuthConfigs map[string]AuthConfig `json:"auths"`
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
PsFormat string `json:"psFormat,omitempty"`
filename string // Note: not serialized - for internal use only
}
// NewConfigFile initilizes an empty configuration file for the given filename 'fn'
func NewConfigFile(fn string) *ConfigFile {
return &ConfigFile{
AuthConfigs: make(map[string]AuthConfig),
// NewConfigFile initializes an empty configuration file for the given filename 'fn'
func NewConfigFile(fn string) *configfile.ConfigFile {
return &configfile.ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
HTTPHeaders: make(map[string]string),
filename: fn,
Filename: fn,
}
}
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
// auth config information with given directory and populates the receiver object
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
b, err := ioutil.ReadAll(configData)
if err != nil {
return err
}
if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
arr := strings.Split(string(b), "\n")
if len(arr) < 2 {
return fmt.Errorf("The Auth config file is empty")
}
authConfig := AuthConfig{}
origAuth := strings.Split(arr[0], " = ")
if len(origAuth) != 2 {
return fmt.Errorf("Invalid Auth config file")
}
authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1])
if err != nil {
return err
}
origEmail := strings.Split(arr[1], " = ")
if len(origEmail) != 2 {
return fmt.Errorf("Invalid Auth config file")
}
authConfig.Email = origEmail[1]
authConfig.ServerAddress = defaultIndexserver
configFile.AuthConfigs[defaultIndexserver] = authConfig
} else {
for k, authConfig := range configFile.AuthConfigs {
authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth)
if err != nil {
return err
}
authConfig.Auth = ""
authConfig.ServerAddress = k
configFile.AuthConfigs[k] = authConfig
}
}
return nil
}
// LoadFromReader reads the configuration data given and sets up the auth config
// information with given directory and populates the receiver object
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
return err
}
var err error
for addr, ac := range configFile.AuthConfigs {
ac.Username, ac.Password, err = DecodeAuth(ac.Auth)
if err != nil {
return err
}
ac.Auth = ""
ac.ServerAddress = addr
configFile.AuthConfigs[addr] = ac
}
return nil
}
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
// a non-nested reader
func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) {
configFile := ConfigFile{
AuthConfigs: make(map[string]AuthConfig),
func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
configFile := configfile.ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
}
err := configFile.LegacyLoadFromReader(configData)
return &configFile, err
@ -144,40 +59,43 @@ func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) {
// LoadFromReader is a convenience function that creates a ConfigFile object from
// a reader
func LoadFromReader(configData io.Reader) (*ConfigFile, error) {
configFile := ConfigFile{
AuthConfigs: make(map[string]AuthConfig),
func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
configFile := configfile.ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
}
err := configFile.LoadFromReader(configData)
return &configFile, err
}
// Load reads the configuration files in the given directory, and sets up
// the auth config information and return values.
// the auth config information and returns values.
// FIXME: use the internal golang config parser
func Load(configDir string) (*ConfigFile, error) {
func Load(configDir string) (*configfile.ConfigFile, error) {
if configDir == "" {
configDir = ConfigDir()
}
configFile := ConfigFile{
AuthConfigs: make(map[string]AuthConfig),
filename: filepath.Join(configDir, ConfigFileName),
configFile := configfile.ConfigFile{
AuthConfigs: make(map[string]types.AuthConfig),
Filename: filepath.Join(configDir, ConfigFileName),
}
// Try happy path first - latest config file
if _, err := os.Stat(configFile.filename); err == nil {
file, err := os.Open(configFile.filename)
if _, err := os.Stat(configFile.Filename); err == nil {
file, err := os.Open(configFile.Filename)
if err != nil {
return &configFile, err
return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err)
}
defer file.Close()
err = configFile.LoadFromReader(file)
if err != nil {
err = fmt.Errorf("%s - %v", configFile.Filename, err)
}
return &configFile, err
} else if !os.IsNotExist(err) {
// if file is there but we can't stat it for any reason other
// than it doesn't exist then stop
return &configFile, err
return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err)
}
// Can't find latest config file so check for the old one
@ -187,87 +105,16 @@ func Load(configDir string) (*ConfigFile, error) {
}
file, err := os.Open(confFile)
if err != nil {
return &configFile, err
return &configFile, fmt.Errorf("%s - %v", confFile, err)
}
defer file.Close()
err = configFile.LegacyLoadFromReader(file)
return &configFile, err
}
// SaveToWriter encodes and writes out all the authorization information to
// the given writer
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
// Encode sensitive data into a new/temp struct
tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs))
for k, authConfig := range configFile.AuthConfigs {
authCopy := authConfig
// encode and save the authstring, while blanking out the original fields
authCopy.Auth = EncodeAuth(&authCopy)
authCopy.Username = ""
authCopy.Password = ""
authCopy.ServerAddress = ""
tmpAuthConfigs[k] = authCopy
}
saveAuthConfigs := configFile.AuthConfigs
configFile.AuthConfigs = tmpAuthConfigs
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
data, err := json.MarshalIndent(configFile, "", "\t")
if err != nil {
return err
}
_, err = writer.Write(data)
return err
}
// Save encodes and writes out all the authorization information
func (configFile *ConfigFile) Save() error {
if configFile.Filename() == "" {
return fmt.Errorf("Can't save config with empty filename")
return &configFile, fmt.Errorf("%s - %v", confFile, err)
}
if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil {
return err
if configFile.HTTPHeaders == nil {
configFile.HTTPHeaders = map[string]string{}
}
f, err := os.OpenFile(configFile.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
return configFile.SaveToWriter(f)
}
// Filename returns the name of the configuration file
func (configFile *ConfigFile) Filename() string {
return configFile.filename
}
// EncodeAuth creates a base64 encoded string to containing authorization information
func EncodeAuth(authConfig *AuthConfig) string {
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
// DecodeAuth decodes a base64 encoded string and returns username and password
func DecodeAuth(authStr string) (string, string, error) {
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return "", "", err
}
if n > decLen {
return "", "", fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.SplitN(string(decoded), ":", 2)
if len(arr) != 2 {
return "", "", fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return arr[0], password, nil
return &configFile, nil
}

View File

@ -0,0 +1,177 @@
package configfile
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/engine-api/types"
)
const (
// This constant is only used for really old config files when the
// URL wasn't saved as part of the config file and it was just
// assumed to be this value.
defaultIndexserver = "https://index.docker.io/v1/"
)
// ConfigFile ~/.docker/config.json file info
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
PsFormat string `json:"psFormat,omitempty"`
ImagesFormat string `json:"imagesFormat,omitempty"`
DetachKeys string `json:"detachKeys,omitempty"`
CredentialsStore string `json:"credsStore,omitempty"`
Filename string `json:"-"` // Note: for internal use only
}
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
// auth config information with given directory and populates the receiver object
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
b, err := ioutil.ReadAll(configData)
if err != nil {
return err
}
if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
arr := strings.Split(string(b), "\n")
if len(arr) < 2 {
return fmt.Errorf("The Auth config file is empty")
}
authConfig := types.AuthConfig{}
origAuth := strings.Split(arr[0], " = ")
if len(origAuth) != 2 {
return fmt.Errorf("Invalid Auth config file")
}
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
if err != nil {
return err
}
authConfig.ServerAddress = defaultIndexserver
configFile.AuthConfigs[defaultIndexserver] = authConfig
} else {
for k, authConfig := range configFile.AuthConfigs {
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
if err != nil {
return err
}
authConfig.Auth = ""
authConfig.ServerAddress = k
configFile.AuthConfigs[k] = authConfig
}
}
return nil
}
// LoadFromReader reads the configuration data given and sets up the auth config
// information with given directory and populates the receiver object
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
return err
}
var err error
for addr, ac := range configFile.AuthConfigs {
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
if err != nil {
return err
}
ac.Auth = ""
ac.ServerAddress = addr
configFile.AuthConfigs[addr] = ac
}
return nil
}
// ContainsAuth returns whether there is authentication configured
// in this file or not.
func (configFile *ConfigFile) ContainsAuth() bool {
return configFile.CredentialsStore != "" ||
(configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0)
}
// SaveToWriter encodes and writes out all the authorization information to
// the given writer
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
// Encode sensitive data into a new/temp struct
tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs))
for k, authConfig := range configFile.AuthConfigs {
authCopy := authConfig
// encode and save the authstring, while blanking out the original fields
authCopy.Auth = encodeAuth(&authCopy)
authCopy.Username = ""
authCopy.Password = ""
authCopy.ServerAddress = ""
tmpAuthConfigs[k] = authCopy
}
saveAuthConfigs := configFile.AuthConfigs
configFile.AuthConfigs = tmpAuthConfigs
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
data, err := json.MarshalIndent(configFile, "", "\t")
if err != nil {
return err
}
_, err = writer.Write(data)
return err
}
// Save encodes and writes out all the authorization information
func (configFile *ConfigFile) Save() error {
if configFile.Filename == "" {
return fmt.Errorf("Can't save config with empty filename")
}
if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil {
return err
}
f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
return configFile.SaveToWriter(f)
}
// encodeAuth creates a base64 encoded string to containing authorization information
func encodeAuth(authConfig *types.AuthConfig) string {
if authConfig.Username == "" && authConfig.Password == "" {
return ""
}
authStr := authConfig.Username + ":" + authConfig.Password
msg := []byte(authStr)
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
base64.StdEncoding.Encode(encoded, msg)
return string(encoded)
}
// decodeAuth decodes a base64 encoded string and returns username and password
func decodeAuth(authStr string) (string, string, error) {
if authStr == "" {
return "", "", nil
}
decLen := base64.StdEncoding.DecodedLen(len(authStr))
decoded := make([]byte, decLen)
authByte := []byte(authStr)
n, err := base64.StdEncoding.Decode(decoded, authByte)
if err != nil {
return "", "", err
}
if n > decLen {
return "", "", fmt.Errorf("Something went wrong decoding auth config")
}
arr := strings.SplitN(string(decoded), ":", 2)
if len(arr) != 2 {
return "", "", fmt.Errorf("Invalid auth configuration file")
}
password := strings.Trim(arr[1], "\x00")
return arr[0], password, nil
}

View File

@ -0,0 +1,67 @@
package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]*minfo
mu sync.Mutex
checker Checker
}
// NewRefCounter returns a new RefCounter
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
}
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.mu.Unlock()
return m.count
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count--
c.mu.Unlock()
return m.count
}

View File

@ -0,0 +1,243 @@
package graphdriver
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
)
// FsMagic unsigned id of the filesystem in use.
type FsMagic uint32
const (
// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
FsMagicUnsupported = FsMagic(0x00000000)
)
var (
// All registered drivers
drivers map[string]InitFunc
// ErrNotSupported returned when driver is not supported.
ErrNotSupported = errors.New("driver not supported")
// ErrPrerequisites retuned when driver does not meet prerequisites.
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
// ErrIncompatibleFS returned when file system is not supported.
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
)
// InitFunc initializes the storage driver.
type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
// for client code which choose not to implement the entire Driver
// interface and use the NaiveDiffDriver wrapper constructor.
//
// Use of ProtoDriver directly by client code is not recommended.
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container.
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
// Create creates a new, empty, filesystem layer with the
// specified id and parent and mountLabel. Parent and mountLabel may be "".
Create(id, parent, mountLabel string, storageOpt map[string]string) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Returns the absolute path to the mounted layered filesystem.
Get(id, mountLabel string) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Returns a set of key-value pairs which give low level information
// about the image/container driver is managing.
GetMetadata(id string) (map[string]string, error)
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
}
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id, parent string) (archive.Archive, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The archive.Reader must be an uncompressed stream.
ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error)
}
// DiffGetterDriver is the interface for layered file system drivers that
// provide a specialized function for getting file contents for tar-split.
type DiffGetterDriver interface {
Driver
// DiffGetter returns an interface to efficiently retrieve the contents
// of files in a layer.
DiffGetter(id string) (FileGetCloser, error)
}
// FileGetCloser extends the storage.FileGetter interface with a Close method
// for cleaning up.
type FileGetCloser interface {
storage.FileGetter
// Close cleans up any resources associated with the FileGetCloser.
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}
// Register registers an InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
// GetDriver initializes and returns the registered driver
func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
if pluginDriver, err := lookupPlugin(name, home, options); err == nil {
return pluginDriver, nil
}
logrus.Errorf("Failed to GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
}
logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
return nil, ErrNotSupported
}
// New creates the driver and initializes it at the specified root.
func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
if name != "" {
logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver
return GetDriver(name, root, options, uidMaps, gidMaps)
}
// Guess for prior driver
driversMap := scanPriorDrivers(root)
for _, name := range priority {
if name == "vfs" {
// don't use vfs even if there is state present.
continue
}
if _, prior := driversMap[name]; prior {
// of the state found from prior drivers, check in order of our priority
// which we would prefer
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
// unlike below, we will return error here, because there is prior
// state, and now it is no longer supported/prereq/compatible, so
// something changed and needs attention. Otherwise the daemon's
// images would just "disappear".
logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err)
return nil, err
}
// abort starting when there are other prior configured drivers
// to ensure the user explicitly selects the driver to load
if len(driversMap)-1 > 0 {
var driversSlice []string
for name := range driversMap {
driversSlice = append(driversSlice, name)
}
return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", root, strings.Join(driversSlice, ", "))
}
logrus.Infof("[graphdriver] using prior storage driver %q", name)
return driver, nil
}
}
// Check for priority drivers first
for _, name := range priority {
driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps)
if err != nil {
if isDriverNotSupported(err) {
continue
}
return nil, err
}
return driver, nil
}
return nil, fmt.Errorf("No supported storage backend found")
}
// isDriverNotSupported returns true if the error initializing
// the graph driver is a non-supported error.
func isDriverNotSupported(err error) bool {
return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
}
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func scanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
for driver := range drivers {
p := filepath.Join(root, driver)
if _, err := os.Stat(p); err == nil && driver != "vfs" {
driversMap[driver] = true
}
}
return driversMap
}

View File

@ -0,0 +1,19 @@
package graphdriver
import "syscall"
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
)
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View File

@ -0,0 +1,133 @@
// +build linux
package graphdriver
import (
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
)
const (
// FsMagicAufs filesystem id for Aufs
FsMagicAufs = FsMagic(0x61756673)
// FsMagicBtrfs filesystem id for Btrfs
FsMagicBtrfs = FsMagic(0x9123683E)
// FsMagicCramfs filesystem id for Cramfs
FsMagicCramfs = FsMagic(0x28cd3d45)
// FsMagicEcryptfs filesystem id for eCryptfs
FsMagicEcryptfs = FsMagic(0xf15f)
// FsMagicExtfs filesystem id for Extfs
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
FsMagicF2fs = FsMagic(0xF2F52010)
// FsMagicGPFS filesystem id for GPFS
FsMagicGPFS = FsMagic(0x47504653)
// FsMagicJffs2Fs filesystem if for Jffs2Fs
FsMagicJffs2Fs = FsMagic(0x000072b6)
// FsMagicJfs filesystem id for Jfs
FsMagicJfs = FsMagic(0x3153464a)
// FsMagicNfsFs filesystem id for NfsFs
FsMagicNfsFs = FsMagic(0x00006969)
// FsMagicRAMFs filesystem id for RamFs
FsMagicRAMFs = FsMagic(0x858458f6)
// FsMagicReiserFs filesystem id for ReiserFs
FsMagicReiserFs = FsMagic(0x52654973)
// FsMagicSmbFs filesystem id for SmbFs
FsMagicSmbFs = FsMagic(0x0000517B)
// FsMagicSquashFs filesystem id for SquashFs
FsMagicSquashFs = FsMagic(0x73717368)
// FsMagicTmpFs filesystem id for TmpFs
FsMagicTmpFs = FsMagic(0x01021994)
// FsMagicVxFS filesystem id for VxFs
FsMagicVxFS = FsMagic(0xa501fcf5)
// FsMagicXfs filesystem id for Xfs
FsMagicXfs = FsMagic(0x58465342)
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
// FsMagicOverlay filesystem id for overlay
FsMagicOverlay = FsMagic(0x794C7630)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
"btrfs",
"zfs",
"devicemapper",
"overlay",
"vfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",
FsMagicRAMFs: "ramfs",
FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb",
FsMagicSquashFs: "squashfs",
FsMagicTmpFs: "tmpfs",
FsMagicUnsupported: "unsupported",
FsMagicVxFS: "vxfs",
FsMagicXfs: "xfs",
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil
}

View File

@ -0,0 +1,65 @@
// +build solaris,cgo
package graphdriver
/*
#include <sys/statvfs.h>
#include <stdlib.h>
static inline struct statvfs *getstatfs(char *s) {
struct statvfs *buf;
int err;
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
err = statvfs(s, buf);
return buf;
}
*/
import "C"
import (
"path/filepath"
"unsafe"
log "github.com/Sirupsen/logrus"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil
}
// Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath))
buf := C.getstatfs(cs)
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf))
return false, ErrPrerequisites
}
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return true, nil
}

View File

@ -0,0 +1,15 @@
// +build !linux,!windows,!freebsd,!solaris
package graphdriver
var (
// Slice of drivers that should be used in an order
priority = []string{
"unsupported",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagicUnsupported, nil
}

View File

@ -0,0 +1,14 @@
package graphdriver
var (
// Slice of drivers that should be used in order
priority = []string{
"windowsfilter",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
// Note it is OK to return FsMagicUnsupported on Windows.
return FsMagicUnsupported, nil
}

View File

@ -0,0 +1,162 @@
package graphdriver
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
)
var (
// ApplyUncompressedLayer defines the unpack method used by the graph
// driver.
ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
)
// NaiveDiffDriver takes a ProtoDriver and adds the
// capability of the Diffing methods which it may or may not
// support on its own. See the comment on the exported
// NewNaiveDiffDriver function below.
// Notably, the AUFS driver doesn't need to be wrapped like this.
type NaiveDiffDriver struct {
ProtoDriver
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
}
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
// Diff(id, parent string) (archive.Archive, error)
// Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
return &NaiveDiffDriver{ProtoDriver: driver,
uidMaps: uidMaps,
gidMaps: gidMaps}
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
if err != nil {
return nil, err
}
defer func() {
if err != nil {
driver.Put(id)
}
}()
if parent == "" {
archive, err := archive.Tar(layerFs, archive.Uncompressed)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
driver.Put(id)
return err
}), nil
}
parentFs, err := driver.Get(parent, "")
if err != nil {
return nil, err
}
defer driver.Put(parent)
changes, err := archive.ChangesDirs(layerFs, parentFs)
if err != nil {
return nil, err
}
archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
driver.Put(id)
return err
}), nil
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
if err != nil {
return nil, err
}
defer driver.Put(id)
parentFs := ""
if parent != "" {
parentFs, err = driver.Get(parent, "")
if err != nil {
return nil, err
}
defer driver.Put(parent)
}
return archive.ChangesDirs(layerFs, parentFs)
}
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) {
driver := gdw.ProtoDriver
// Mount the root filesystem so we can apply the diff/layer.
layerFs, err := driver.Get(id, "")
if err != nil {
return
}
defer driver.Put(id)
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
return
}
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
return
}
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
driver := gdw.ProtoDriver
changes, err := gdw.Changes(id, parent)
if err != nil {
return
}
layerFs, err := driver.Get(id, "")
if err != nil {
return
}
defer driver.Put(id)
return archive.ChangesSize(layerFs, changes), nil
}

View File

@ -0,0 +1,32 @@
// +build experimental
package graphdriver
import (
"fmt"
"io"
"github.com/docker/docker/pkg/plugins"
)
type pluginClient interface {
// Call calls the specified method with the specified arguments for the plugin.
Call(string, interface{}, interface{}) error
// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
Stream(string, interface{}) (io.ReadCloser, error)
// SendFile calls the specified method, and passes through the IO stream
SendFile(string, io.Reader, interface{}) error
}
func lookupPlugin(name, home string, opts []string) (Driver, error) {
pl, err := plugins.Get(name, "GraphDriver")
if err != nil {
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
}
return newPluginDriver(name, home, opts, pl.Client())
}
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {
proxy := &graphDriverProxy{name, c}
return proxy, proxy.Init(home, opts)
}

View File

@ -0,0 +1,7 @@
// +build !experimental
package graphdriver
func lookupPlugin(name, home string, opts []string) (Driver, error) {
return nil, ErrNotSupported
}

View File

@ -0,0 +1,225 @@
// +build experimental
package graphdriver
import (
"errors"
"fmt"
"github.com/docker/docker/pkg/archive"
)
type graphDriverProxy struct {
name string
client pluginClient
}
type graphDriverRequest struct {
ID string `json:",omitempty"`
Parent string `json:",omitempty"`
MountLabel string `json:",omitempty"`
}
type graphDriverResponse struct {
Err string `json:",omitempty"`
Dir string `json:",omitempty"`
Exists bool `json:",omitempty"`
Status [][2]string `json:",omitempty"`
Changes []archive.Change `json:",omitempty"`
Size int64 `json:",omitempty"`
Metadata map[string]string `json:",omitempty"`
}
type graphDriverInitRequest struct {
Home string
Opts []string
}
func (d *graphDriverProxy) Init(home string, opts []string) error {
args := &graphDriverInitRequest{
Home: home,
Opts: opts,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) String() string {
return d.name
}
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Remove(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
args := &graphDriverRequest{
ID: id,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil {
return "", err
}
var err error
if ret.Err != "" {
err = errors.New(ret.Err)
}
return ret.Dir, err
}
func (d *graphDriverProxy) Put(id string) error {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Exists(id string) bool {
args := &graphDriverRequest{ID: id}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil {
return false
}
return ret.Exists
}
func (d *graphDriverProxy) Status() [][2]string {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil {
return nil
}
return ret.Status
}
func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
args := &graphDriverRequest{
ID: id,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Metadata, nil
}
func (d *graphDriverProxy) Cleanup() error {
args := &graphDriverRequest{}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil {
return nil
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
body, err := d.client.Stream("GraphDriver.Diff", args)
if err != nil {
return nil, err
}
return archive.Archive(body), nil
}
func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil {
return nil, err
}
if ret.Err != "" {
return nil, errors.New(ret.Err)
}
return ret.Changes, nil
}
func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) {
var ret graphDriverResponse
if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}
func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil {
return -1, err
}
if ret.Err != "" {
return -1, errors.New(ret.Err)
}
return ret.Size, nil
}

View File

@ -1,16 +0,0 @@
// +build !autogen
// Package dockerversion is auto-generated at build-time
package dockerversion
// Default build-time variable for library-import.
// This file is overridden on build with build-time informations.
const (
GitCommit string = "library-import"
Version string = "library-import"
BuildTime string = "library-import"
IAmStatic string = "library-import"
InitSHA1 string = "library-import"
InitPath string = "library-import"
)

View File

@ -1,58 +0,0 @@
Docker 'errors' package
=======================
This package contains all of the error messages generated by the Docker
engine that might be exposed via the Docker engine's REST API.
Each top-level engine package will have its own file in this directory
so that there's a clear grouping of errors, instead of just one big
file. The errors for each package are defined here instead of within
their respective package structure so that Docker CLI code that may need
to import these error definition files will not need to know or understand
the engine's package/directory structure. In other words, all they should
need to do is import `.../docker/errors` and they will automatically
pick up all Docker engine defined errors. This also gives the engine
developers the freedom to change the engine packaging structure (e.g. to
CRUD packages) without worrying about breaking existing clients.
These errors are defined using the 'errcode' package. The `errcode` package
allows for each error to be typed and include all information necessary to
have further processing done on them if necessary. In particular, each error
includes:
* Value - a unique string (in all caps) associated with this error.
Typically, this string is the same name as the variable name of the error
(w/o the `ErrorCode` text) but in all caps.
* Message - the human readable sentence that will be displayed for this
error. It can contain '%s' substitutions that allows for the code generating
the error to specify values that will be inserted in the string prior to
being displayed to the end-user. The `WithArgs()` function can be used to
specify the insertion strings. Note, the evaluation of the strings will be
done at the time `WithArgs()` is called.
* Description - additional human readable text to further explain the
circumstances of the error situation.
* HTTPStatusCode - when the error is returned back to a CLI, this value
will be used to populate the HTTP status code. If not present the default
value will be `StatusInternalServerError`, 500.
Not all errors generated within the engine's executable will be propagated
back to the engine's API layer. For example, it is expected that errors
generated by vendored code (under `docker/vendor`) and packaged code
(under `docker/pkg`) will be converted into errors defined by this package.
When processing an errcode error, if you are looking for a particular
error then you can do something like:
```
import derr "github.com/docker/docker/errors"
...
err := someFunc()
if err.ErrorCode() == derr.ErrorCodeNoSuchContainer {
...
}
```

View File

@ -1,93 +0,0 @@
package errors
// This file contains all of the errors that can be generated from the
// docker/builder component.
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
var (
// ErrorCodeAtLeastOneArg is generated when the parser comes across a
// Dockerfile command that doesn't have any args.
ErrorCodeAtLeastOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "ATLEASTONEARG",
Message: "%s requires at least one argument",
Description: "The specified command requires at least one argument",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeExactlyOneArg is generated when the parser comes across a
// Dockerfile command that requires exactly one arg but got less/more.
ErrorCodeExactlyOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXACTLYONEARG",
Message: "%s requires exactly one argument",
Description: "The specified command requires exactly one argument",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeAtLeastTwoArgs is generated when the parser comes across a
// Dockerfile command that requires at least two args but got less.
ErrorCodeAtLeastTwoArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "ATLEASTTWOARGS",
Message: "%s requires at least two arguments",
Description: "The specified command requires at least two arguments",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeTooManyArgs is generated when the parser comes across a
// Dockerfile command that has more args than it should
ErrorCodeTooManyArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "TOOMANYARGS",
Message: "Bad input to %s, too many args",
Description: "The specified command was passed too many arguments",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeChainOnBuild is generated when the parser comes across a
// Dockerfile command that is trying to chain ONBUILD commands.
ErrorCodeChainOnBuild = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CHAINONBUILD",
Message: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
Description: "ONBUILD Dockerfile commands aren't allow on ONBUILD commands",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeBadOnBuildCmd is generated when the parser comes across a
// an ONBUILD Dockerfile command with an invalid trigger/command.
ErrorCodeBadOnBuildCmd = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BADONBUILDCMD",
Message: "%s isn't allowed as an ONBUILD trigger",
Description: "The specified ONBUILD command isn't allowed",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeMissingFrom is generated when the Dockerfile is missing
// a FROM command.
ErrorCodeMissingFrom = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MISSINGFROM",
Message: "Please provide a source image with `from` prior to run",
Description: "The Dockerfile is missing a FROM command",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNotOnWindows is generated when the specified Dockerfile
// command is not supported on Windows.
ErrorCodeNotOnWindows = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTONWINDOWS",
Message: "%s is not supported on Windows",
Description: "The specified Dockerfile command is not supported on Windows",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeEmpty is generated when the specified Volume string
// is empty.
ErrorCodeVolumeEmpty = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEEMPTY",
Message: "Volume specified can not be an empty string",
Description: "The specified volume can not be an empty string",
HTTPStatusCode: http.StatusInternalServerError,
})
)

View File

@ -1,951 +0,0 @@
package errors
// This file contains all of the errors that can be generated from the
// docker/daemon component.
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
var (
// ErrorCodeNoSuchContainer is generated when we look for a container by
// name or ID and we can't find it.
ErrorCodeNoSuchContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOSUCHCONTAINER",
Message: "no such id: %s",
Description: "The specified container can not be found",
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeUnregisteredContainer is generated when we try to load
// a storage driver for an unregistered container
ErrorCodeUnregisteredContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "UNREGISTEREDCONTAINER",
Message: "Can't load storage driver for unregistered container %s",
Description: "An attempt was made to load the storage driver for a container that is not registered with the daemon",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeContainerBeingRemoved is generated when an attempt to start
// a container is made but its in the process of being removed, or is dead.
ErrorCodeContainerBeingRemoved = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CONTAINERBEINGREMOVED",
Message: "Container is marked for removal and cannot be started.",
Description: "An attempt was made to start a container that is in the process of being deleted",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeUnpauseContainer is generated when we attempt to stop a
// container but its paused.
ErrorCodeUnpauseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "UNPAUSECONTAINER",
Message: "Container %s is paused. Unpause the container before stopping",
Description: "The specified container is paused, before it can be stopped it must be unpaused",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodePausedContainer is generated when we attempt to attach a
// container but its paused.
ErrorCodePausedContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CONTAINERPAUSED",
Message: "Container %s is paused. Unpause the container before attach",
Description: "The specified container is paused, unpause the container before attach",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeAlreadyPaused is generated when we attempt to pause a
// container when its already paused.
ErrorCodeAlreadyPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "ALREADYPAUSED",
Message: "Container %s is already paused",
Description: "The specified container is already in the paused state",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNotPaused is generated when we attempt to unpause a
// container when its not paused.
ErrorCodeNotPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTPAUSED",
Message: "Container %s is not paused",
Description: "The specified container can not be unpaused because it is not in a paused state",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeImageUnregContainer is generated when we attempt to get the
// image of an unknown/unregistered container.
ErrorCodeImageUnregContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "IMAGEUNREGCONTAINER",
Message: "Can't get image of unregistered container",
Description: "An attempt to retrieve the image of a container was made but the container is not registered",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeEmptyID is generated when an ID is the emptry string.
ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYID",
Message: "Invalid empty id",
Description: "An attempt was made to register a container but the container's ID can not be an empty string",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeLoggingFactory is generated when we could not load the
// log driver.
ErrorCodeLoggingFactory = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "LOGGINGFACTORY",
Message: "Failed to get logging factory: %v",
Description: "An attempt was made to register a container but the container's ID can not be an empty string",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeInitLogger is generated when we could not initialize
// the logging driver.
ErrorCodeInitLogger = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "INITLOGGER",
Message: "Failed to initialize logging driver: %v",
Description: "An error occurred while trying to initialize the logging driver",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNotRunning is generated when we need to verify that
// a container is running, but its not.
ErrorCodeNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTRUNNING",
Message: "Container %s is not running",
Description: "The specified action can not be taken due to the container not being in a running state",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeLinkNotRunning is generated when we try to link to a
// container that is not running.
ErrorCodeLinkNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "LINKNOTRUNNING",
Message: "Cannot link to a non running container: %s AS %s",
Description: "An attempt was made to link to a container but the container is not in a running state",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeDeviceInfo is generated when there is an error while trying
// to get info about a custom device.
// container that is not running.
ErrorCodeDeviceInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "DEVICEINFO",
Message: "error gathering device information while adding custom device %q: %s",
Description: "There was an error while trying to retrieve the information about a custom device",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeEmptyEndpoint is generated when the endpoint for a port
// map is nil.
ErrorCodeEmptyEndpoint = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYENDPOINT",
Message: "invalid endpoint while building port map info",
Description: "The specified endpoint for the port mapping is empty",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeEmptyNetwork is generated when the networkSettings for a port
// map is nil.
ErrorCodeEmptyNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYNETWORK",
Message: "invalid networksettings while building port map info",
Description: "The specified endpoint for the port mapping is empty",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeParsingPort is generated when there is an error parsing
// a "port" string.
ErrorCodeParsingPort = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "PARSINGPORT",
Message: "Error parsing Port value(%v):%v",
Description: "There was an error while trying to parse the specified 'port' value",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNoSandbox is generated when we can't find the specified
// sandbox(network) by ID.
ErrorCodeNoSandbox = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOSANDBOX",
Message: "error locating sandbox id %s: %v",
Description: "There was an error trying to located the specified networking sandbox",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNetworkUpdate is generated when there is an error while
// trying update a network/sandbox config.
ErrorCodeNetworkUpdate = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NETWORKUPDATE",
Message: "Update network failed: %v",
Description: "There was an error trying to update the configuration information of the specified network sandbox",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNetworkRefresh is generated when there is an error while
// trying refresh a network/sandbox config.
ErrorCodeNetworkRefresh = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NETWORKREFRESH",
Message: "Update network failed: Failure in refresh sandbox %s: %v",
Description: "There was an error trying to refresh the configuration information of the specified network sandbox",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeHostPort is generated when there was an error while trying
// to parse a "host/port" string.
ErrorCodeHostPort = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "HOSTPORT",
Message: "Error parsing HostPort value(%s):%v",
Description: "There was an error trying to parse the specified 'HostPort' value",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNetworkConflict is generated when we try to publish a service
// in network mode.
ErrorCodeNetworkConflict = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NETWORKCONFLICT",
Message: "conflicting options: publishing a service and network mode",
Description: "It is not possible to publish a service when it is in network mode",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeJoinInfo is generated when we failed to update a container's
// join info.
ErrorCodeJoinInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "JOININFO",
Message: "Updating join info failed: %v",
Description: "There was an error during an attempt update a container's join information",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeIPCRunning is generated when we try to join a container's
// IPC but its not running.
ErrorCodeIPCRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "IPCRUNNING",
Message: "cannot join IPC of a non running container: %s",
Description: "An attempt was made to join the IPC of a container, but the container is not running",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNotADir is generated when we try to create a directory
// but the path isn't a dir.
ErrorCodeNotADir = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTADIR",
Message: "Cannot mkdir: %s is not a directory",
Description: "An attempt was made create a directory, but the location in which it is being created is not a directory",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeParseContainer is generated when the reference to a
// container doesn't include a ":" (another container).
ErrorCodeParseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "PARSECONTAINER",
Message: "no container specified to join network",
Description: "The specified reference to a container is missing a ':' as a separator between 'container' and 'name'/'id'",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeJoinSelf is generated when we try to network to ourselves.
ErrorCodeJoinSelf = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "JOINSELF",
Message: "cannot join own network",
Description: "An attempt was made to have a container join its own network",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeJoinRunning is generated when we try to network to ourselves.
ErrorCodeJoinRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "JOINRUNNING",
Message: "cannot join network of a non running container: %s",
Description: "An attempt to join the network of a container, but that container isn't running",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeModeNotContainer is generated when we try to network to
// another container but the mode isn't 'container'.
ErrorCodeModeNotContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MODENOTCONTAINER",
Message: "network mode not set to container",
Description: "An attempt was made to connect to a container's network but the mode wasn't set to 'container'",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRemovingVolume is generated when we try remove a mount
// point (volume) but fail.
ErrorCodeRemovingVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "REMOVINGVOLUME",
Message: "Error removing volumes:\n%v",
Description: "There was an error while trying to remove the mount point (volume) of a container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeInvalidNetworkMode is generated when an invalid network
// mode value is specified.
ErrorCodeInvalidNetworkMode = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "INVALIDNETWORKMODE",
Message: "invalid network mode: %s",
Description: "The specified networking mode is not valid",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeGetGraph is generated when there was an error while
// trying to find a graph/image.
ErrorCodeGetGraph = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "GETGRAPH",
Message: "Failed to graph.Get on ImageID %s - %s",
Description: "There was an error trying to retrieve the image for the specified image ID",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeGetLayer is generated when there was an error while
// trying to retrieve a particular layer of an image.
ErrorCodeGetLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "GETLAYER",
Message: "Failed to get layer path from graphdriver %s for ImageID %s - %s",
Description: "There was an error trying to retrieve the layer of the specified image",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodePutLayer is generated when there was an error while
// trying to 'put' a particular layer of an image.
ErrorCodePutLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "PUTLAYER",
Message: "Failed to put layer path from graphdriver %s for ImageID %s - %s",
Description: "There was an error trying to store a layer for the specified image",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeGetLayerMetadata is generated when there was an error while
// trying to retrieve the metadata of a layer of an image.
ErrorCodeGetLayerMetadata = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "GETLAYERMETADATA",
Message: "Failed to get layer metadata - %s",
Description: "There was an error trying to retrieve the metadata of a layer for the specified image",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeEmptyConfig is generated when the input config data
// is empty.
ErrorCodeEmptyConfig = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYCONFIG",
Message: "Config cannot be empty in order to create a container",
Description: "While trying to create a container, the specified configuration information was empty",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNoSuchImageHash is generated when we can't find the
// specified image by its hash
ErrorCodeNoSuchImageHash = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOSUCHIMAGEHASH",
Message: "No such image: %s",
Description: "An attempt was made to find an image by its hash, but the lookup failed",
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeNoSuchImageTag is generated when we can't find the
// specified image byt its name/tag.
ErrorCodeNoSuchImageTag = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOSUCHIMAGETAG",
Message: "No such image: %s:%s",
Description: "An attempt was made to find an image by its name/tag, but the lookup failed",
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeMountOverFile is generated when we try to mount a volume
// over an existing file (but not a dir).
ErrorCodeMountOverFile = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MOUNTOVERFILE",
Message: "cannot mount volume over existing file, file exists %s",
Description: "An attempt was made to mount a volume at the same location as a pre-existing file",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeMountSetup is generated when we can't define a mount point
// due to the source and destination being undefined.
ErrorCodeMountSetup = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "MOUNTSETUP",
Message: "Unable to setup mount point, neither source nor volume defined",
Description: "An attempt was made to setup a mount point, but the source and destination are undefined",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeInvalidMode is generated when the mode of a volume/bind
// mount is invalid.
ErrorCodeVolumeInvalidMode = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEINVALIDMODE",
Message: "invalid mode: %q",
Description: "An invalid 'mode' was specified",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeInvalid is generated when the format fo the
// volume specification isn't valid.
ErrorCodeVolumeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEINVALID",
Message: "Invalid volume specification: '%s'",
Description: "An invalid 'volume' was specified in the mount request",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeAbs is generated when path to a volume isn't absolute.
ErrorCodeVolumeAbs = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEABS",
Message: "Invalid volume destination path: '%s' mount path must be absolute.",
Description: "An invalid 'destination' path was specified in the mount request, it must be an absolute path",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeName is generated when the name of named volume isn't valid.
ErrorCodeVolumeName = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUME_NAME_INVALID",
Message: "%q includes invalid characters for a local volume name, only %q are allowed",
Description: "The name of volume is invalid",
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeVolumeSlash is generated when destination path to a volume is /
ErrorCodeVolumeSlash = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMESLASH",
Message: "Invalid specification: destination can't be '/' in '%s'",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeDestIsC is generated the destination is c: (Windows specific)
ErrorCodeVolumeDestIsC = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEDESTISC",
Message: "Destination drive letter in '%s' cannot be c:",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeDestIsCRoot is generated the destination path is c:\ (Windows specific)
ErrorCodeVolumeDestIsCRoot = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEDESTISCROOT",
Message: `Destination path in '%s' cannot be c:\`,
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeSourceNotFound is generated the source directory could not be found (Windows specific)
ErrorCodeVolumeSourceNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMESOURCENOTFOUND",
Message: "Source directory '%s' could not be found: %s",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeSourceNotDirectory is generated the source is not a directory (Windows specific)
ErrorCodeVolumeSourceNotDirectory = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMESOURCENOTDIRECTORY",
Message: "Source '%s' is not a directory",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeFromBlank is generated when path to a volume is blank.
ErrorCodeVolumeFromBlank = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEFROMBLANK",
Message: "malformed volumes-from specification: %q",
Description: "An invalid 'destination' path was specified in the mount request, it must not be blank",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeDup is generated when we try to mount two volumes
// to the same path.
ErrorCodeVolumeDup = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMEDUP",
Message: "Duplicate bind mount '%s'",
Description: "An attempt was made to mount a volume but the specified destination location is already used in a previous mount",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeNoSourceForMount is generated when no source directory
// for a volume mount was found. (Windows specific)
ErrorCodeVolumeNoSourceForMount = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMENOSOURCEFORMOUNT",
Message: "No source for mount name '%s' driver %q destination '%s'",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeVolumeNameReservedWord is generated when the name in a volume
// uses a reserved word for filenames. (Windows specific)
ErrorCodeVolumeNameReservedWord = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUMENAMERESERVEDWORD",
Message: "Volume name %q cannot be a reserved word for Windows filenames",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeCantUnpause is generated when there's an error while trying
// to unpause a container.
ErrorCodeCantUnpause = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CANTUNPAUSE",
Message: "Cannot unpause container %s: %s",
Description: "An error occurred while trying to unpause the specified container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodePSError is generated when trying to run 'ps'.
ErrorCodePSError = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "PSError",
Message: "Error running ps: %s",
Description: "There was an error trying to run the 'ps' command in the specified container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNoPID is generated when looking for the PID field in the
// ps output.
ErrorCodeNoPID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOPID",
Message: "Couldn't find PID field in ps output",
Description: "There was no 'PID' field in the output of the 'ps' command that was executed",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeBadPID is generated when we can't convert a PID to an int.
ErrorCodeBadPID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BADPID",
Message: "Unexpected pid '%s': %s",
Description: "While trying to parse the output of the 'ps' command, the 'PID' field was not an integer",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNoTop is generated when we try to run 'top' but can't
// because we're on windows.
ErrorCodeNoTop = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTOP",
Message: "Top is not supported on Windows",
Description: "The 'top' command is not supported on Windows",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeStopped is generated when we try to stop a container
// that is already stopped.
ErrorCodeStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "STOPPED",
Message: "Container already stopped",
Description: "An attempt was made to stop a container, but the container is already stopped",
HTTPStatusCode: http.StatusNotModified,
})
// ErrorCodeCantStop is generated when we try to stop a container
// but failed for some reason.
ErrorCodeCantStop = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CANTSTOP",
Message: "Cannot stop container %s: %s\n",
Description: "An error occurred while tring to stop the specified container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeBadCPUFields is generated when the number of CPU fields is
// less than 8.
ErrorCodeBadCPUFields = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BADCPUFIELDS",
Message: "invalid number of cpu fields",
Description: "While reading the '/proc/stat' file, the number of 'cpu' fields is less than 8",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeBadCPUInt is generated the CPU field can't be parsed as an int.
ErrorCodeBadCPUInt = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BADCPUINT",
Message: "Unable to convert value %s to int: %s",
Description: "While reading the '/proc/stat' file, the 'CPU' field could not be parsed as an integer",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeBadStatFormat is generated the output of the stat info
// isn't parseable.
ErrorCodeBadStatFormat = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "BADSTATFORMAT",
Message: "invalid stat format",
Description: "There was an error trying to parse the '/proc/stat' file",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeTimedOut is generated when a timer expires.
ErrorCodeTimedOut = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "TIMEDOUT",
Message: "Timed out: %v",
Description: "A timer expired",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeAlreadyRemoving is generated when we try to remove a
// container that is already being removed.
ErrorCodeAlreadyRemoving = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "ALREADYREMOVING",
Message: "Status is already RemovalInProgress",
Description: "An attempt to remove a container was made, but the container is already in the process of being removed",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeStartPaused is generated when we start a paused container.
ErrorCodeStartPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "STARTPAUSED",
Message: "Cannot start a paused container, try unpause instead.",
Description: "An attempt to start a container was made, but the container is paused. Unpause it first",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeAlreadyStarted is generated when we try to start a container
// that is already running.
ErrorCodeAlreadyStarted = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "ALREADYSTARTED",
Message: "Container already started",
Description: "An attempt to start a container was made, but the container is already started",
HTTPStatusCode: http.StatusNotModified,
})
// ErrorCodeHostConfigStart is generated when a HostConfig is passed
// into the start command.
ErrorCodeHostConfigStart = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "HOSTCONFIGSTART",
Message: "Supplying a hostconfig on start is not supported. It should be supplied on create",
Description: "The 'start' command does not accept 'HostConfig' data, try using the 'create' command instead",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeCantRestart is generated when an error occurred while
// trying to restart a container.
ErrorCodeCantRestart = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CANTRESTART",
Message: "Cannot restart container %s: %s",
Description: "There was an error while trying to restart a container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeEmptyRename is generated when one of the names on a
// rename is empty.
ErrorCodeEmptyRename = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EMPTYRENAME",
Message: "Neither old nor new names may be empty",
Description: "An attempt was made to rename a container but either the old or new names were blank",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRenameTaken is generated when we try to rename but the
// new name isn't available.
ErrorCodeRenameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RENAMETAKEN",
Message: "Error when allocating new name: %s",
Description: "The new name specified on the 'rename' command is already being used",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRenameDelete is generated when we try to rename but
// failed trying to delete the old container.
ErrorCodeRenameDelete = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RENAMEDELETE",
Message: "Failed to delete container %q: %v",
Description: "There was an error trying to delete the specified container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodePauseError is generated when we try to pause a container
// but failed.
ErrorCodePauseError = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "PAUSEERROR",
Message: "Cannot pause container %s: %s",
Description: "There was an error trying to pause the specified container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNeedStream is generated when we try to stream a container's
// logs but no output stream was specified.
ErrorCodeNeedStream = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NEEDSTREAM",
Message: "You must choose at least one stream",
Description: "While trying to stream a container's logs, no output stream was specified",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeDanglingOne is generated when we try to specify more than one
// 'dangling' specifier.
ErrorCodeDanglingOne = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "DANLGINGONE",
Message: "Conflict: cannot use more than 1 value for `dangling` filter",
Description: "The specified 'dangling' filter may not have more than one value",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeImgDelUsed is generated when we try to delete an image
// but it is being used.
ErrorCodeImgDelUsed = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "IMGDELUSED",
Message: "conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s",
Description: "An attempt was made to delete an image but it is currently being used",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeImgNoParent is generated when we try to find an image's
// parent but its not in the graph.
ErrorCodeImgNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "IMGNOPARENT",
Message: "unable to get parent image: %v",
Description: "There was an error trying to find an image's parent, it was not in the graph",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeExportFailed is generated when an export fails.
ErrorCodeExportFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXPORTFAILED",
Message: "%s: %s",
Description: "There was an error during an export operation",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeExecResize is generated when we try to resize an exec
// but its not running.
ErrorCodeExecResize = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXECRESIZE",
Message: "Exec %s is not running, so it can not be resized.",
Description: "An attempt was made to resize an 'exec', but the 'exec' is not running",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeContainerNotRunning is generated when we try to get the info
// on an exec but the container is not running.
ErrorCodeContainerNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CONTAINERNOTRUNNING",
Message: "Container %s is not running: %s",
Description: "An attempt was made to retrieve the information about an 'exec' but the container is not running",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeNoExecID is generated when we try to get the info
// on an exec but it can't be found.
ErrorCodeNoExecID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOEXECID",
Message: "No such exec instance '%s' found in daemon",
Description: "The specified 'exec' instance could not be found",
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeExecPaused is generated when we try to start an exec
// but the container is paused.
ErrorCodeExecPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXECPAUSED",
Message: "Container %s is paused, unpause the container before exec",
Description: "An attempt to start an 'exec' was made, but the owning container is paused",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeExecRunning is generated when we try to start an exec
// but its already running.
ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXECRUNNING",
Message: "Error: Exec command %s is already running",
Description: "An attempt to start an 'exec' was made, but 'exec' is already running",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeExecCantRun is generated when we try to start an exec
// but it failed for some reason.
ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXECCANTRUN",
Message: "Cannot run exec command %s in container %s: %s",
Description: "An attempt to start an 'exec' was made, but an error occurred",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeExecAttach is generated when we try to attach to an exec
// but failed.
ErrorCodeExecAttach = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXECATTACH",
Message: "attach failed with error: %s",
Description: "There was an error while trying to attach to an 'exec'",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeExecContainerStopped is generated when we try to start
// an exec but then the container stopped.
ErrorCodeExecContainerStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "EXECCONTAINERSTOPPED",
Message: "container stopped while running exec",
Description: "An attempt was made to start an 'exec' but the owning container is in the 'stopped' state",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeDefaultName is generated when we try to delete the
// default name of a container.
ErrorCodeDefaultName = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "DEFAULTNAME",
Message: "Conflict, cannot remove the default name of the container",
Description: "An attempt to delete the default name of a container was made, but that is not allowed",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeNoParent is generated when we try to delete a container
// but we can't find its parent image.
ErrorCodeNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOPARENT",
Message: "Cannot get parent %s for name %s",
Description: "An attempt was made to delete a container but its parent image could not be found",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeCantDestroy is generated when we try to delete a container
// but failed for some reason.
ErrorCodeCantDestroy = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CANTDESTROY",
Message: "Cannot destroy container %s: %v",
Description: "An attempt was made to delete a container but it failed",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmRunning is generated when we try to delete a container
// but its still running.
ErrorCodeRmRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMRUNNING",
Message: "Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f",
Description: "An attempt was made to delete a container but the container is still running, try to either stop it first or use '-f'",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeRmFailed is generated when we try to delete a container
// but it failed for some reason.
ErrorCodeRmFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMFAILED",
Message: "Could not kill running container, cannot remove - %v",
Description: "An error occurred while trying to delete a running container",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmNotFound is generated when we try to delete a container
// but couldn't find it.
ErrorCodeRmNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMNOTFOUND",
Message: "Could not kill running container, cannot remove - %v",
Description: "An attempt to delete a container was made but the container could not be found",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmState is generated when we try to delete a container
// but couldn't set its state to RemovalInProgress.
ErrorCodeRmState = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMSTATE",
Message: "Failed to set container state to RemovalInProgress: %s",
Description: "An attempt to delete a container was made, but there as an error trying to set its state to 'RemovalInProgress'",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmDriverFS is generated when we try to delete a container
// but the driver failed to delete its filesystem.
ErrorCodeRmDriverFS = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMDRIVERFS",
Message: "Driver %s failed to remove root filesystem %s: %s",
Description: "While trying to delete a container, the driver failed to remove the root filesystem",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmInit is generated when we try to delete a container
// but failed deleting its init filesystem.
ErrorCodeRmInit = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMINIT",
Message: "Driver %s failed to remove init filesystem %s: %s",
Description: "While trying to delete a container, the driver failed to remove the init filesystem",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmFS is generated when we try to delete a container
// but failed deleting its filesystem.
ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMFS",
Message: "Unable to remove filesystem for %v: %v",
Description: "While trying to delete a container, the driver failed to remove the filesystem",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmExecDriver is generated when we try to delete a container
// but failed deleting its exec driver data.
ErrorCodeRmExecDriver = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMEXECDRIVER",
Message: "Unable to remove execdriver data for %s: %s",
Description: "While trying to delete a container, there was an error trying to remove th exec driver data",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeRmVolumeInUse is generated when we try to delete a container
// but failed deleting a volume because its being used.
ErrorCodeRmVolumeInUse = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMVOLUMEINUSE",
Message: "Conflict: %v",
Description: "While trying to delete a container, one of its volumes is still being used",
HTTPStatusCode: http.StatusConflict,
})
// ErrorCodeRmVolume is generated when we try to delete a container
// but failed deleting a volume.
ErrorCodeRmVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "RMVOLUME",
Message: "Error while removing volume %s: %v",
Description: "While trying to delete a container, there was an error trying to delete one of its volumes",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeInvalidCpusetCpus is generated when user provided cpuset CPUs
// are invalid.
ErrorCodeInvalidCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "INVALIDCPUSETCPUS",
Message: "Invalid value %s for cpuset cpus.",
Description: "While verifying the container's 'HostConfig', CpusetCpus value was in an incorrect format",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeInvalidCpusetMems is generated when user provided cpuset mems
// are invalid.
ErrorCodeInvalidCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "INVALIDCPUSETMEMS",
Message: "Invalid value %s for cpuset mems.",
Description: "While verifying the container's 'HostConfig', CpusetMems value was in an incorrect format",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNotAvailableCpusetCpus is generated when user provided cpuset
// CPUs aren't available in the container's cgroup.
ErrorCodeNotAvailableCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTAVAILABLECPUSETCPUS",
Message: "Requested CPUs are not available - requested %s, available: %s.",
Description: "While verifying the container's 'HostConfig', cpuset CPUs provided aren't available in the container's cgroup available set",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeNotAvailableCpusetMems is generated when user provided cpuset
// memory nodes aren't available in the container's cgroup.
ErrorCodeNotAvailableCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NOTAVAILABLECPUSETMEMS",
Message: "Requested memory nodes are not available - requested %s, available: %s.",
Description: "While verifying the container's 'HostConfig', cpuset memory nodes provided aren't available in the container's cgroup available set",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorVolumeNameTaken is generated when an error occurred while
// trying to create a volume that has existed using different driver.
ErrorVolumeNameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "VOLUME_NAME_TAKEN",
Message: "A volume name %s already exists with the %s driver. Choose a different volume name.",
Description: "An attempt to create a volume using a driver but the volume already exists with a different driver",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeCmdNotFound is generated when container cmd can't start,
// container command not found error, exit code 127
ErrorCodeCmdNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CMDNOTFOUND",
Message: "Container command not found or does not exist.",
Description: "Command could not be found, command does not exist",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeCmdCouldNotBeInvoked is generated when container cmd can't start,
// container command permission denied error, exit code 126
ErrorCodeCmdCouldNotBeInvoked = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CMDCOULDNOTBEINVOKED",
Message: "Container command could not be invoked.",
Description: "Permission denied, cannot invoke command",
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeCantStart is generated when container cmd can't start,
// for any reason other than above 2 errors
ErrorCodeCantStart = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "CANTSTART",
Message: "Cannot start container %s: %s",
Description: "There was an error while trying to start a container",
HTTPStatusCode: http.StatusInternalServerError,
})
)

View File

@ -1,6 +0,0 @@
package errors
// This file contains all of the errors that can be generated from the
// docker engine but are not tied to any specific top-level component.
const errGroup = "engine"

View File

@ -1,20 +0,0 @@
package errors
// This file contains all of the errors that can be generated from the
// docker/image component.
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
var (
// ErrorCodeInvalidImageID is generated when image id specified is incorrectly formatted.
ErrorCodeInvalidImageID = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "INVALIDIMAGEID",
Message: "image ID '%s' is invalid ",
Description: "The specified image id is incorrectly formatted",
HTTPStatusCode: http.StatusInternalServerError,
})
)

View File

@ -1,36 +0,0 @@
package errors
import (
"net/http"
"github.com/docker/distribution/registry/api/errcode"
)
var (
// ErrorCodeNewerClientVersion is generated when a request from a client
// specifies a higher version than the server supports.
ErrorCodeNewerClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NEWERCLIENTVERSION",
Message: "client is newer than server (client API version: %s, server API version: %s)",
Description: "The client version is higher than the server version",
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeOldClientVersion is generated when a request from a client
// specifies a version lower than the minimum version supported by the server.
ErrorCodeOldClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "OLDCLIENTVERSION",
Message: "client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version",
Description: "The client version is too old for the server",
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorNetworkControllerNotEnabled is generated when the networking stack in not enabled
// for certain platforms, like windows.
ErrorNetworkControllerNotEnabled = errcode.Register(errGroup, errcode.ErrorDescriptor{
Value: "NETWORK_CONTROLLER_NOT_ENABLED",
Message: "the network controller is not enabled for this platform",
Description: "Docker's networking stack is disabled for this platform",
HTTPStatusCode: http.StatusNotFound,
})
)

175
vendor/github.com/docker/docker/image/fs.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
package image
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/ioutils"
)
// IDWalkFunc is function called by StoreBackend.Walk
type IDWalkFunc func(id ID) error
// StoreBackend provides interface for image.Store persistence
type StoreBackend interface {
Walk(f IDWalkFunc) error
Get(id ID) ([]byte, error)
Set(data []byte) (ID, error)
Delete(id ID) error
SetMetadata(id ID, key string, data []byte) error
GetMetadata(id ID, key string) ([]byte, error)
DeleteMetadata(id ID, key string) error
}
// fs implements StoreBackend using the filesystem.
type fs struct {
sync.RWMutex
root string
}
const (
contentDirName = "content"
metadataDirName = "metadata"
)
// NewFSStoreBackend returns new filesystem based backend for image.Store
func NewFSStoreBackend(root string) (StoreBackend, error) {
return newFSStore(root)
}
func newFSStore(root string) (*fs, error) {
s := &fs{
root: root,
}
if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil {
return nil, err
}
return s, nil
}
func (s *fs) contentFile(id ID) string {
dgst := digest.Digest(id)
return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
}
func (s *fs) metadataDir(id ID) string {
dgst := digest.Digest(id)
return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
}
// Walk calls the supplied callback for each image ID in the storage backend.
func (s *fs) Walk(f IDWalkFunc) error {
// Only Canonical digest (sha256) is currently supported
s.RLock()
dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
s.RUnlock()
if err != nil {
return err
}
for _, v := range dir {
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
continue
}
if err := f(ID(dgst)); err != nil {
return err
}
}
return nil
}
// Get returns the content stored under a given ID.
func (s *fs) Get(id ID) ([]byte, error) {
s.RLock()
defer s.RUnlock()
return s.get(id)
}
func (s *fs) get(id ID) ([]byte, error) {
content, err := ioutil.ReadFile(s.contentFile(id))
if err != nil {
return nil, err
}
// todo: maybe optional
if ID(digest.FromBytes(content)) != id {
return nil, fmt.Errorf("failed to verify image: %v", id)
}
return content, nil
}
// Set stores content under a given ID.
func (s *fs) Set(data []byte) (ID, error) {
s.Lock()
defer s.Unlock()
if len(data) == 0 {
return "", fmt.Errorf("Invalid empty data")
}
id := ID(digest.FromBytes(data))
if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil {
return "", err
}
return id, nil
}
// Delete removes content and metadata files associated with the ID.
func (s *fs) Delete(id ID) error {
s.Lock()
defer s.Unlock()
if err := os.RemoveAll(s.metadataDir(id)); err != nil {
return err
}
if err := os.Remove(s.contentFile(id)); err != nil {
return err
}
return nil
}
// SetMetadata sets metadata for a given ID. It fails if there's no base file.
func (s *fs) SetMetadata(id ID, key string, data []byte) error {
s.Lock()
defer s.Unlock()
if _, err := s.get(id); err != nil {
return err
}
baseDir := filepath.Join(s.metadataDir(id))
if err := os.MkdirAll(baseDir, 0700); err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600)
}
// GetMetadata returns metadata for a given ID.
func (s *fs) GetMetadata(id ID, key string) ([]byte, error) {
s.RLock()
defer s.RUnlock()
if _, err := s.get(id); err != nil {
return nil, err
}
return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key))
}
// DeleteMetadata removes the metadata associated with an ID.
func (s *fs) DeleteMetadata(id ID, key string) error {
s.Lock()
defer s.Unlock()
return os.RemoveAll(filepath.Join(s.metadataDir(id), key))
}

View File

@ -2,36 +2,23 @@ package image
import (
"encoding/json"
"fmt"
"regexp"
"errors"
"io"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
derr "github.com/docker/docker/errors"
"github.com/docker/docker/pkg/version"
"github.com/docker/docker/runconfig"
"github.com/docker/engine-api/types/container"
)
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
// ID is the content-addressable ID of an image.
type ID digest.Digest
// noFallbackMinVersion is the minimum version for which v1compatibility
// information will not be marshaled through the Image struct to remove
// blank fields.
var noFallbackMinVersion = version.Version("1.8.3")
// Descriptor provides the information necessary to register an image in
// the graph.
type Descriptor interface {
ID() string
Parent() string
MarshalConfig() ([]byte, error)
func (id ID) String() string {
return digest.Digest(id).String()
}
// Image stores the image configuration.
// All fields in this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Image struct {
// V1Image stores the V1 image configuration.
type V1Image struct {
// ID a unique 64 character identifier of the image
ID string `json:"id,omitempty"`
// Parent id of the image
@ -42,108 +29,112 @@ type Image struct {
Created time.Time `json:"created"`
// Container is the id of the container used to commit
Container string `json:"container,omitempty"`
// ContainerConfig is the configuration of the container that is committed into the image
ContainerConfig runconfig.Config `json:"container_config,omitempty"`
// ContainerConfig is the configuration of the container that is committed into the image
ContainerConfig container.Config `json:"container_config,omitempty"`
// DockerVersion specifies version on which image is built
DockerVersion string `json:"docker_version,omitempty"`
// Author of the image
Author string `json:"author,omitempty"`
// Config is the configuration of the container received from the client
Config *runconfig.Config `json:"config,omitempty"`
Config *container.Config `json:"config,omitempty"`
// Architecture is the hardware that the image is build and runs on
Architecture string `json:"architecture,omitempty"`
// OS is the operating system used to build and run the image
OS string `json:"os,omitempty"`
// Size is the total size of the image including all layers it is composed of
Size int64 `json:",omitempty"` // capitalized for backwards compatibility
// ParentID specifies the strong, content address of the parent configuration.
ParentID digest.Digest `json:"parent_id,omitempty"`
// LayerID provides the content address of the associated layer.
LayerID digest.Digest `json:"layer_id,omitempty"`
Size int64 `json:",omitempty"`
}
// NewImgJSON creates an Image configuration from json.
func NewImgJSON(src []byte) (*Image, error) {
ret := &Image{}
// Image stores the image configuration
type Image struct {
V1Image
Parent ID `json:"parent,omitempty"`
RootFS *RootFS `json:"rootfs,omitempty"`
History []History `json:"history,omitempty"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
// FIXME: Is there a cleaner way to "purify" the input json?
if err := json.Unmarshal(src, ret); err != nil {
return nil, err
}
return ret, nil
// rawJSON caches the immutable JSON associated with this image.
rawJSON []byte
// computedID is the ID computed from the hash of the image config.
// Not to be confused with the legacy V1 ID in V1Image.
computedID ID
}
// ValidateID checks whether an ID string is a valid image ID.
func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok {
return derr.ErrorCodeInvalidImageID.WithArgs(id)
}
return nil
// RawJSON returns the immutable JSON associated with the image.
func (img *Image) RawJSON() []byte {
return img.rawJSON
}
// MakeImageConfig returns immutable configuration JSON for image based on the
// v1Compatibility object, layer digest and parent StrongID. SHA256() of this
// config is the new image ID (strongID).
func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) {
// ID returns the image's content-addressable ID.
func (img *Image) ID() ID {
return img.computedID
}
// Detect images created after 1.8.3
img, err := NewImgJSON(v1Compatibility)
// ImageID stringizes ID.
func (img *Image) ImageID() string {
return string(img.ID())
}
// RunConfig returns the image's container config.
func (img *Image) RunConfig() *container.Config {
return img.Config
}
// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
// that JSON that's been manipulated by a push/pull cycle with a legacy
// registry won't end up with a different key order.
func (img *Image) MarshalJSON() ([]byte, error) {
type MarshalImage Image
pass1, err := json.Marshal(MarshalImage(*img))
if err != nil {
return nil, err
}
useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion)
if useFallback {
// Fallback for pre-1.8.3. Calculate base config based on Image struct
// so that fields with default values added by Docker will use same ID
logrus.Debugf("Using fallback hash for %v", layerID)
v1Compatibility, err = json.Marshal(img)
if err != nil {
return nil, err
}
}
var c map[string]*json.RawMessage
if err := json.Unmarshal(v1Compatibility, &c); err != nil {
if err := json.Unmarshal(pass1, &c); err != nil {
return nil, err
}
if err := layerID.Validate(); err != nil {
return nil, fmt.Errorf("invalid layerID: %v", err)
}
c["layer_id"] = rawJSON(layerID)
if parentID != "" {
if err := parentID.Validate(); err != nil {
return nil, fmt.Errorf("invalid parentID %v", err)
}
c["parent_id"] = rawJSON(parentID)
}
delete(c, "id")
delete(c, "parent")
delete(c, "Size") // Size is calculated from data on disk and is inconsitent
return json.Marshal(c)
}
// StrongID returns image ID for the config JSON.
func StrongID(configJSON []byte) (digest.Digest, error) {
digester := digest.Canonical.New()
if _, err := digester.Hash().Write(configJSON); err != nil {
return "", err
}
dgst := digester.Digest()
logrus.Debugf("H(%v) = %v", string(configJSON), dgst)
return dgst, nil
// History stores build commands that were used to create an image
type History struct {
// Created timestamp for build point
Created time.Time `json:"created"`
// Author of the build point
Author string `json:"author,omitempty"`
// CreatedBy keeps the Dockerfile command used while building image.
CreatedBy string `json:"created_by,omitempty"`
// Comment is custom message set by the user when creating the image.
Comment string `json:"comment,omitempty"`
// EmptyLayer is set to true if this history item did not generate a
// layer. Otherwise, the history item is associated with the next
// layer in the RootFS section.
EmptyLayer bool `json:"empty_layer,omitempty"`
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
// Exporter provides interface for exporting and importing images
type Exporter interface {
Load(io.ReadCloser, io.Writer, bool) error
// TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
Save([]string, io.Writer) error
}
// NewFromJSON creates an Image configuration from json.
func NewFromJSON(src []byte) (*Image, error) {
img := &Image{}
if err := json.Unmarshal(src, img); err != nil {
return nil, err
}
if img.RootFS == nil {
return nil, errors.New("Invalid image JSON, no RootFS key.")
}
img.rawJSON = src
return img, nil
}

16
vendor/github.com/docker/docker/image/rootfs.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package image
import "github.com/docker/docker/layer"
// TypeLayers is used for RootFS.Type for filesystems organized into layers.
const TypeLayers = "layers"
// NewRootFS returns empty RootFS struct
func NewRootFS() *RootFS {
return &RootFS{Type: TypeLayers}
}
// Append appends a new diffID to rootfs
func (r *RootFS) Append(id layer.DiffID) {
r.DiffIDs = append(r.DiffIDs, id)
}

18
vendor/github.com/docker/docker/image/rootfs_unix.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// +build !windows
package image
import "github.com/docker/docker/layer"
// RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into an interface that supports different implementations.
type RootFS struct {
Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
}
// ChainID returns the ChainID for the top layer in RootFS.
func (r *RootFS) ChainID() layer.ChainID {
return layer.CreateChainID(r.DiffIDs)
}

View File

@ -0,0 +1,48 @@
// +build windows
package image
import (
"crypto/sha512"
"fmt"
"github.com/docker/distribution/digest"
"github.com/docker/docker/layer"
)
// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer.
const TypeLayersWithBase = "layers+base"
// RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into an interface that supports different implementations.
type RootFS struct {
Type string `json:"type"`
DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
BaseLayer string `json:"base_layer,omitempty"`
}
// BaseLayerID returns the 64 byte hex ID for the baselayer name.
func (r *RootFS) BaseLayerID() string {
if r.Type != TypeLayersWithBase {
panic("tried to get base layer ID without a base layer")
}
baseID := sha512.Sum384([]byte(r.BaseLayer))
return fmt.Sprintf("%x", baseID[:32])
}
// ChainID returns the ChainID for the top layer in RootFS.
func (r *RootFS) ChainID() layer.ChainID {
ids := r.DiffIDs
if r.Type == TypeLayersWithBase {
// Add an extra ID for the base.
baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID())))
ids = append([]layer.DiffID{baseDiffID}, ids...)
}
return layer.CreateChainID(ids)
}
// NewRootFSWithBaseLayer returns a RootFS struct with a base layer
func NewRootFSWithBaseLayer(baseLayer string) *RootFS {
return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer}
}

295
vendor/github.com/docker/docker/image/store.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
package image
import (
"encoding/json"
"errors"
"fmt"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/layer"
)
// Store is an interface for creating and accessing images
type Store interface {
Create(config []byte) (ID, error)
Get(id ID) (*Image, error)
Delete(id ID) ([]layer.Metadata, error)
Search(partialID string) (ID, error)
SetParent(id ID, parent ID) error
GetParent(id ID) (ID, error)
Children(id ID) []ID
Map() map[ID]*Image
Heads() map[ID]*Image
}
// LayerGetReleaser is a minimal interface for getting and releasing images.
type LayerGetReleaser interface {
Get(layer.ChainID) (layer.Layer, error)
Release(layer.Layer) ([]layer.Metadata, error)
}
type imageMeta struct {
layer layer.Layer
children map[ID]struct{}
}
type store struct {
sync.Mutex
ls LayerGetReleaser
images map[ID]*imageMeta
fs StoreBackend
digestSet *digest.Set
}
// NewImageStore returns new store object for given layer store
func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
is := &store{
ls: ls,
images: make(map[ID]*imageMeta),
fs: fs,
digestSet: digest.NewSet(),
}
// load all current images and retain layers
if err := is.restore(); err != nil {
return nil, err
}
return is, nil
}
func (is *store) restore() error {
err := is.fs.Walk(func(id ID) error {
img, err := is.Get(id)
if err != nil {
logrus.Errorf("invalid image %v, %v", id, err)
return nil
}
var l layer.Layer
if chainID := img.RootFS.ChainID(); chainID != "" {
l, err = is.ls.Get(chainID)
if err != nil {
return err
}
}
if err := is.digestSet.Add(digest.Digest(id)); err != nil {
return err
}
imageMeta := &imageMeta{
layer: l,
children: make(map[ID]struct{}),
}
is.images[ID(id)] = imageMeta
return nil
})
if err != nil {
return err
}
// Second pass to fill in children maps
for id := range is.images {
if parent, err := is.GetParent(id); err == nil {
if parentMeta := is.images[parent]; parentMeta != nil {
parentMeta.children[id] = struct{}{}
}
}
}
return nil
}
func (is *store) Create(config []byte) (ID, error) {
var img Image
err := json.Unmarshal(config, &img)
if err != nil {
return "", err
}
// Must reject any config that references diffIDs from the history
// which aren't among the rootfs layers.
rootFSLayers := make(map[layer.DiffID]struct{})
for _, diffID := range img.RootFS.DiffIDs {
rootFSLayers[diffID] = struct{}{}
}
layerCounter := 0
for _, h := range img.History {
if !h.EmptyLayer {
layerCounter++
}
}
if layerCounter > len(img.RootFS.DiffIDs) {
return "", errors.New("too many non-empty layers in History section")
}
dgst, err := is.fs.Set(config)
if err != nil {
return "", err
}
imageID := ID(dgst)
is.Lock()
defer is.Unlock()
if _, exists := is.images[imageID]; exists {
return imageID, nil
}
layerID := img.RootFS.ChainID()
var l layer.Layer
if layerID != "" {
l, err = is.ls.Get(layerID)
if err != nil {
return "", err
}
}
imageMeta := &imageMeta{
layer: l,
children: make(map[ID]struct{}),
}
is.images[imageID] = imageMeta
if err := is.digestSet.Add(digest.Digest(imageID)); err != nil {
delete(is.images, imageID)
return "", err
}
return imageID, nil
}
func (is *store) Search(term string) (ID, error) {
is.Lock()
defer is.Unlock()
dgst, err := is.digestSet.Lookup(term)
if err != nil {
if err == digest.ErrDigestNotFound {
err = fmt.Errorf("No such image: %s", term)
}
return "", err
}
return ID(dgst), nil
}
func (is *store) Get(id ID) (*Image, error) {
// todo: Check if image is in images
// todo: Detect manual insertions and start using them
config, err := is.fs.Get(id)
if err != nil {
return nil, err
}
img, err := NewFromJSON(config)
if err != nil {
return nil, err
}
img.computedID = id
img.Parent, err = is.GetParent(id)
if err != nil {
img.Parent = ""
}
return img, nil
}
func (is *store) Delete(id ID) ([]layer.Metadata, error) {
is.Lock()
defer is.Unlock()
imageMeta := is.images[id]
if imageMeta == nil {
return nil, fmt.Errorf("unrecognized image ID %s", id.String())
}
for id := range imageMeta.children {
is.fs.DeleteMetadata(id, "parent")
}
if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
delete(is.images[parent].children, id)
}
if err := is.digestSet.Remove(digest.Digest(id)); err != nil {
logrus.Errorf("error removing %s from digest set: %q", id, err)
}
delete(is.images, id)
is.fs.Delete(id)
if imageMeta.layer != nil {
return is.ls.Release(imageMeta.layer)
}
return nil, nil
}
func (is *store) SetParent(id, parent ID) error {
is.Lock()
defer is.Unlock()
parentMeta := is.images[parent]
if parentMeta == nil {
return fmt.Errorf("unknown parent image ID %s", parent.String())
}
if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
delete(is.images[parent].children, id)
}
parentMeta.children[id] = struct{}{}
return is.fs.SetMetadata(id, "parent", []byte(parent))
}
func (is *store) GetParent(id ID) (ID, error) {
d, err := is.fs.GetMetadata(id, "parent")
if err != nil {
return "", err
}
return ID(d), nil // todo: validate?
}
func (is *store) Children(id ID) []ID {
is.Lock()
defer is.Unlock()
return is.children(id)
}
func (is *store) children(id ID) []ID {
var ids []ID
if is.images[id] != nil {
for id := range is.images[id].children {
ids = append(ids, id)
}
}
return ids
}
func (is *store) Heads() map[ID]*Image {
return is.imagesMap(false)
}
func (is *store) Map() map[ID]*Image {
return is.imagesMap(true)
}
func (is *store) imagesMap(all bool) map[ID]*Image {
is.Lock()
defer is.Unlock()
images := make(map[ID]*Image)
for id := range is.images {
if !all && len(is.children(id)) > 0 {
continue
}
img, err := is.Get(id)
if err != nil {
logrus.Errorf("invalid image access: %q, error: %q", id, err)
continue
}
images[id] = img
}
return images
}

156
vendor/github.com/docker/docker/image/v1/imagev1.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
package v1
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/engine-api/types/versions"
)
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
// noFallbackMinVersion is the minimum version for which v1compatibility
// information will not be marshaled through the Image struct to remove
// blank fields.
var noFallbackMinVersion = "1.8.3"
// HistoryFromConfig creates a History struct from v1 configuration JSON
func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
h := image.History{}
var v1Image image.V1Image
if err := json.Unmarshal(imageJSON, &v1Image); err != nil {
return h, err
}
return image.History{
Author: v1Image.Author,
Created: v1Image.Created,
CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "),
Comment: v1Image.Comment,
EmptyLayer: emptyLayer,
}, nil
}
// CreateID creates an ID from v1 image, layerID and parent ID.
// Used for backwards compatibility with old clients.
func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) {
v1Image.ID = ""
v1JSON, err := json.Marshal(v1Image)
if err != nil {
return "", err
}
var config map[string]*json.RawMessage
if err := json.Unmarshal(v1JSON, &config); err != nil {
return "", err
}
// FIXME: note that this is slightly incompatible with RootFS logic
config["layer_id"] = rawJSON(layerID)
if parent != "" {
config["parent"] = rawJSON(parent)
}
configJSON, err := json.Marshal(config)
if err != nil {
return "", err
}
logrus.Debugf("CreateV1ID %s", configJSON)
return digest.FromBytes(configJSON), nil
}
// MakeConfigFromV1Config creates an image config from the legacy V1 config format.
func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) {
var dver struct {
DockerVersion string `json:"docker_version"`
}
if err := json.Unmarshal(imageJSON, &dver); err != nil {
return nil, err
}
useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion)
if useFallback {
var v1Image image.V1Image
err := json.Unmarshal(imageJSON, &v1Image)
if err != nil {
return nil, err
}
imageJSON, err = json.Marshal(v1Image)
if err != nil {
return nil, err
}
}
var c map[string]*json.RawMessage
if err := json.Unmarshal(imageJSON, &c); err != nil {
return nil, err
}
delete(c, "id")
delete(c, "parent")
delete(c, "Size") // Size is calculated from data on disk and is inconsistent
delete(c, "parent_id")
delete(c, "layer_id")
delete(c, "throwaway")
c["rootfs"] = rawJSON(rootfs)
c["history"] = rawJSON(history)
return json.Marshal(c)
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct
func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.
var configAsMap map[string]*json.RawMessage
if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil {
return nil, err
}
// Delete fields that didn't exist in old manifest
imageType := reflect.TypeOf(img).Elem()
for i := 0; i < imageType.NumField(); i++ {
f := imageType.Field(i)
jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
// Parent is handled specially below.
if jsonName != "" && jsonName != "parent" {
delete(configAsMap, jsonName)
}
}
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)
}
if throwaway {
configAsMap["throwaway"] = rawJSON(true)
}
return json.Marshal(configAsMap)
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
}
// ValidateID checks whether an ID string is a valid image ID.
func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok {
return fmt.Errorf("image ID %q is invalid", id)
}
return nil
}

48
vendor/github.com/docker/docker/layer/empty.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package layer
import (
"archive/tar"
"bytes"
"io"
"io/ioutil"
)
// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file -
// (1024 NULL bytes)
const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef")
type emptyLayer struct{}
// EmptyLayer is a layer that corresponds to empty tar.
var EmptyLayer = &emptyLayer{}
func (el *emptyLayer) TarStream() (io.ReadCloser, error) {
buf := new(bytes.Buffer)
tarWriter := tar.NewWriter(buf)
tarWriter.Close()
return ioutil.NopCloser(buf), nil
}
func (el *emptyLayer) ChainID() ChainID {
return ChainID(DigestSHA256EmptyTar)
}
func (el *emptyLayer) DiffID() DiffID {
return DigestSHA256EmptyTar
}
func (el *emptyLayer) Parent() Layer {
return nil
}
func (el *emptyLayer) Size() (size int64, err error) {
return 0, nil
}
func (el *emptyLayer) DiffSize() (size int64, err error) {
return 0, nil
}
func (el *emptyLayer) Metadata() (map[string]string, error) {
return make(map[string]string), nil
}

354
vendor/github.com/docker/docker/layer/filestore.go generated vendored Normal file
View File

@ -0,0 +1,354 @@
package layer
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/ioutils"
)
var (
stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
supportedAlgorithms = []digest.Algorithm{
digest.SHA256,
// digest.SHA384, // Currently not used
// digest.SHA512, // Currently not used
}
)
type fileMetadataStore struct {
root string
}
type fileMetadataTransaction struct {
store *fileMetadataStore
root string
}
// NewFSMetadataStore returns an instance of a metadata store
// which is backed by files on disk using the provided root
// as the root of metadata files.
func NewFSMetadataStore(root string) (MetadataStore, error) {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
return &fileMetadataStore{
root: root,
}, nil
}
func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
dgst := digest.Digest(layer)
return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
}
func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
return filepath.Join(fms.getLayerDirectory(layer), filename)
}
func (fms *fileMetadataStore) getMountDirectory(mount string) string {
return filepath.Join(fms.root, "mounts", mount)
}
func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
return filepath.Join(fms.getMountDirectory(mount), filename)
}
func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) {
tmpDir := filepath.Join(fms.root, "tmp")
if err := os.MkdirAll(tmpDir, 0755); err != nil {
return nil, err
}
td, err := ioutil.TempDir(tmpDir, "layer-")
if err != nil {
return nil, err
}
// Create a new tempdir
return &fileMetadataTransaction{
store: fms,
root: td,
}, nil
}
func (fm *fileMetadataTransaction) SetSize(size int64) error {
content := fmt.Sprintf("%d", size)
return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644)
}
func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644)
}
func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644)
}
func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644)
}
func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
jsonRef, err := json.Marshal(ref)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(fm.root, "descriptor.json"), jsonRef, 0644)
}
func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
var wc io.WriteCloser
if compressInput {
wc = gzip.NewWriter(f)
} else {
wc = f
}
return ioutils.NewWriteCloserWrapper(wc, func() error {
wc.Close()
return f.Close()
}), nil
}
func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
finalDir := fm.store.getLayerDirectory(layer)
if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
return err
}
return os.Rename(fm.root, finalDir)
}
func (fm *fileMetadataTransaction) Cancel() error {
return os.RemoveAll(fm.root)
}
func (fm *fileMetadataTransaction) String() string {
return fm.root
}
func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size"))
if err != nil {
return 0, err
}
size, err := strconv.ParseInt(string(content), 10, 64)
if err != nil {
return 0, err
}
return size, nil
}
func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
return ChainID(dgst), nil
}
func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff"))
if err != nil {
return "", err
}
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
return DiffID(dgst), nil
}
func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id"))
if err != nil {
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid cache id value")
}
return content, nil
}
func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
if err != nil {
if os.IsNotExist(err) {
// only return empty descriptor to represent what is stored
return distribution.Descriptor{}, nil
}
return distribution.Descriptor{}, err
}
var ref distribution.Descriptor
err = json.Unmarshal(content, &ref)
if err != nil {
return distribution.Descriptor{}, err
}
return ref, err
}
func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
if err != nil {
return nil, err
}
f, err := gzip.NewReader(fz)
if err != nil {
return nil, err
}
return ioutils.NewReadCloserWrapper(f, func() error {
f.Close()
return fz.Close()
}), nil
}
func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
return err
}
return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644)
}
func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
return err
}
return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644)
}
func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
return err
}
return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644)
}
func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id"))
if err != nil {
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid mount id value")
}
return content, nil
}
func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid init id value")
}
return content, nil
}
func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
return ChainID(dgst), nil
}
func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
var ids []ChainID
for _, algorithm := range supportedAlgorithms {
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
if err != nil {
if os.IsNotExist(err) {
continue
}
return nil, nil, err
}
for _, fi := range fileInfos {
if fi.IsDir() && fi.Name() != "mounts" {
dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
} else {
ids = append(ids, ChainID(dgst))
}
}
}
}
fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts"))
if err != nil {
if os.IsNotExist(err) {
return ids, []string{}, nil
}
return nil, nil, err
}
var mounts []string
for _, fi := range fileInfos {
if fi.IsDir() {
mounts = append(mounts, fi.Name())
}
}
return ids, mounts, nil
}
func (fms *fileMetadataStore) Remove(layer ChainID) error {
return os.RemoveAll(fms.getLayerDirectory(layer))
}
func (fms *fileMetadataStore) RemoveMount(mount string) error {
return os.RemoveAll(fms.getMountDirectory(mount))
}

270
vendor/github.com/docker/docker/layer/layer.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
// Package layer is package for managing read-only
// and read-write mounts on the union file system
// driver. Read-only mounts are referenced using a
// content hash and are protected from mutation in
// the exposed interface. The tar format is used
// to create read-only layers and export both
// read-only and writable layers. The exported
// tar data for a read-only layer should match
// the tar used to create the layer.
package layer
import (
"errors"
"io"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/archive"
)
var (
// ErrLayerDoesNotExist is used when an operation is
// attempted on a layer which does not exist.
ErrLayerDoesNotExist = errors.New("layer does not exist")
// ErrLayerNotRetained is used when a release is
// attempted on a layer which is not retained.
ErrLayerNotRetained = errors.New("layer not retained")
// ErrMountDoesNotExist is used when an operation is
// attempted on a mount layer which does not exist.
ErrMountDoesNotExist = errors.New("mount does not exist")
// ErrMountNameConflict is used when a mount is attempted
// to be created but there is already a mount with the name
// used for creation.
ErrMountNameConflict = errors.New("mount already exists with name")
// ErrActiveMount is used when an operation on a
// mount is attempted but the layer is still
// mounted and the operation cannot be performed.
ErrActiveMount = errors.New("mount still active")
// ErrNotMounted is used when requesting an active
// mount but the layer is not mounted.
ErrNotMounted = errors.New("not mounted")
// ErrMaxDepthExceeded is used when a layer is attempted
// to be created which would result in a layer depth
// greater than the 125 max.
ErrMaxDepthExceeded = errors.New("max depth exceeded")
// ErrNotSupported is used when the action is not supported
// on the current platform
ErrNotSupported = errors.New("not support on this platform")
)
// ChainID is the content-addressable ID of a layer.
type ChainID digest.Digest
// String returns a string rendition of a layer ID
func (id ChainID) String() string {
return string(id)
}
// DiffID is the hash of an individual layer tar.
type DiffID digest.Digest
// String returns a string rendition of a layer DiffID
func (diffID DiffID) String() string {
return string(diffID)
}
// TarStreamer represents an object which may
// have its contents exported as a tar stream.
type TarStreamer interface {
// TarStream returns a tar archive stream
// for the contents of a layer.
TarStream() (io.ReadCloser, error)
}
// Layer represents a read-only layer
type Layer interface {
TarStreamer
// ChainID returns the content hash of the entire layer chain. The hash
// chain is made up of DiffID of top layer and all of its parents.
ChainID() ChainID
// DiffID returns the content hash of the layer
// tar stream used to create this layer.
DiffID() DiffID
// Parent returns the next layer in the layer chain.
Parent() Layer
// Size returns the size of the entire layer chain. The size
// is calculated from the total size of all files in the layers.
Size() (int64, error)
// DiffSize returns the size difference of the top layer
// from parent layer.
DiffSize() (int64, error)
// Metadata returns the low level storage metadata associated
// with layer.
Metadata() (map[string]string, error)
}
// RWLayer represents a layer which is
// read and writable
type RWLayer interface {
TarStreamer
// Name of mounted layer
Name() string
// Parent returns the layer which the writable
// layer was created from.
Parent() Layer
// Mount mounts the RWLayer and returns the filesystem path
// the to the writable layer.
Mount(mountLabel string) (string, error)
// Unmount unmounts the RWLayer. This should be called
// for every mount. If there are multiple mount calls
// this operation will only decrement the internal mount counter.
Unmount() error
// Size represents the size of the writable layer
// as calculated by the total size of the files
// changed in the mutable layer.
Size() (int64, error)
// Changes returns the set of changes for the mutable layer
// from the base layer.
Changes() ([]archive.Change, error)
// Metadata returns the low level metadata for the mutable layer
Metadata() (map[string]string, error)
}
// Metadata holds information about a
// read-only layer
type Metadata struct {
// ChainID is the content hash of the layer
ChainID ChainID
// DiffID is the hash of the tar data used to
// create the layer
DiffID DiffID
// Size is the size of the layer and all parents
Size int64
// DiffSize is the size of the top layer
DiffSize int64
}
// MountInit is a function to initialize a
// writable mount. Changes made here will
// not be included in the Tar stream of the
// RWLayer.
type MountInit func(root string) error
// Store represents a backend for managing both
// read-only and read-write layers.
type Store interface {
Register(io.Reader, ChainID) (Layer, error)
Get(ChainID) (Layer, error)
Release(Layer) ([]Metadata, error)
CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
GetRWLayer(id string) (RWLayer, error)
GetMountID(id string) (string, error)
ReleaseRWLayer(RWLayer) ([]Metadata, error)
Cleanup() error
DriverStatus() [][2]string
DriverName() string
}
// DescribableStore represents a layer store capable of storing
// descriptors for layers.
type DescribableStore interface {
RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error)
}
// MetadataTransaction represents functions for setting layer metadata
// with a single transaction.
type MetadataTransaction interface {
SetSize(int64) error
SetParent(parent ChainID) error
SetDiffID(DiffID) error
SetCacheID(string) error
SetDescriptor(distribution.Descriptor) error
TarSplitWriter(compressInput bool) (io.WriteCloser, error)
Commit(ChainID) error
Cancel() error
String() string
}
// MetadataStore represents a backend for persisting
// metadata about layers and providing the metadata
// for restoring a Store.
type MetadataStore interface {
// StartTransaction starts an update for new metadata
// which will be used to represent an ID on commit.
StartTransaction() (MetadataTransaction, error)
GetSize(ChainID) (int64, error)
GetParent(ChainID) (ChainID, error)
GetDiffID(ChainID) (DiffID, error)
GetCacheID(ChainID) (string, error)
GetDescriptor(ChainID) (distribution.Descriptor, error)
TarSplitReader(ChainID) (io.ReadCloser, error)
SetMountID(string, string) error
SetInitID(string, string) error
SetMountParent(string, ChainID) error
GetMountID(string) (string, error)
GetInitID(string) (string, error)
GetMountParent(string) (ChainID, error)
// List returns the full list of referenced
// read-only and read-write layers
List() ([]ChainID, []string, error)
Remove(ChainID) error
RemoveMount(string) error
}
// CreateChainID returns ID for a layerDigest slice
func CreateChainID(dgsts []DiffID) ChainID {
return createChainIDFromParent("", dgsts...)
}
func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID {
if len(dgsts) == 0 {
return parent
}
if parent == "" {
return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...)
}
// H = "H(n-1) SHA256(n)"
dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
return createChainIDFromParent(ChainID(dgst), dgsts[1:]...)
}
// ReleaseAndLog releases the provided layer from the given layer
// store, logging any error and release metadata
func ReleaseAndLog(ls Store, l Layer) {
metadata, err := ls.Release(l)
if err != nil {
logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err)
}
LogReleaseMetadata(metadata)
}
// LogReleaseMetadata logs a metadata array, uses this to
// ensure consistent logging for release metadata
func LogReleaseMetadata(metadatas []Metadata) {
for _, metadata := range metadatas {
logrus.Infof("Layer %s cleaned up", metadata.ChainID)
}
}

659
vendor/github.com/docker/docker/layer/layer_store.go generated vendored Normal file
View File

@ -0,0 +1,659 @@
package layer
import (
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stringid"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// maxLayerDepth represents the maximum number of
// layers which can be chained together. 125 was
// chosen to account for the 127 max in some
// graphdrivers plus the 2 additional layers
// used to create a rwlayer.
const maxLayerDepth = 125
type layerStore struct {
store MetadataStore
driver graphdriver.Driver
layerMap map[ChainID]*roLayer
layerL sync.Mutex
mounts map[string]*mountedLayer
mountL sync.Mutex
}
// StoreOptions are the options used to create a new Store instance
type StoreOptions struct {
StorePath string
MetadataStorePathTemplate string
GraphDriver string
GraphDriverOptions []string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
}
// NewStoreFromOptions creates a new Store instance
func NewStoreFromOptions(options StoreOptions) (Store, error) {
driver, err := graphdriver.New(
options.StorePath,
options.GraphDriver,
options.GraphDriverOptions,
options.UIDMaps,
options.GIDMaps)
if err != nil {
return nil, fmt.Errorf("error initializing graphdriver: %v", err)
}
logrus.Debugf("Using graph driver %s", driver)
fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver))
if err != nil {
return nil, err
}
return NewStoreFromGraphDriver(fms, driver)
}
// NewStoreFromGraphDriver creates a new Store instance using the provided
// metadata store and graph driver. The metadata store will be used to restore
// the Store.
func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) {
ls := &layerStore{
store: store,
driver: driver,
layerMap: map[ChainID]*roLayer{},
mounts: map[string]*mountedLayer{},
}
ids, mounts, err := store.List()
if err != nil {
return nil, err
}
for _, id := range ids {
l, err := ls.loadLayer(id)
if err != nil {
logrus.Debugf("Failed to load layer %s: %s", id, err)
continue
}
if l.parent != nil {
l.parent.referenceCount++
}
}
for _, mount := range mounts {
if err := ls.loadMount(mount); err != nil {
logrus.Debugf("Failed to load mount %s: %s", mount, err)
}
}
return ls, nil
}
func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
cl, ok := ls.layerMap[layer]
if ok {
return cl, nil
}
diff, err := ls.store.GetDiffID(layer)
if err != nil {
return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err)
}
size, err := ls.store.GetSize(layer)
if err != nil {
return nil, fmt.Errorf("failed to get size for %s: %s", layer, err)
}
cacheID, err := ls.store.GetCacheID(layer)
if err != nil {
return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err)
}
parent, err := ls.store.GetParent(layer)
if err != nil {
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
}
descriptor, err := ls.store.GetDescriptor(layer)
if err != nil {
return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
}
cl = &roLayer{
chainID: layer,
diffID: diff,
size: size,
cacheID: cacheID,
layerStore: ls,
references: map[Layer]struct{}{},
descriptor: descriptor,
}
if parent != "" {
p, err := ls.loadLayer(parent)
if err != nil {
return nil, err
}
cl.parent = p
}
ls.layerMap[cl.chainID] = cl
return cl, nil
}
func (ls *layerStore) loadMount(mount string) error {
if _, ok := ls.mounts[mount]; ok {
return nil
}
mountID, err := ls.store.GetMountID(mount)
if err != nil {
return err
}
initID, err := ls.store.GetInitID(mount)
if err != nil {
return err
}
parent, err := ls.store.GetMountParent(mount)
if err != nil {
return err
}
ml := &mountedLayer{
name: mount,
mountID: mountID,
initID: initID,
layerStore: ls,
references: map[RWLayer]*referencedRWLayer{},
}
if parent != "" {
p, err := ls.loadLayer(parent)
if err != nil {
return err
}
ml.parent = p
p.referenceCount++
}
ls.mounts[ml.name] = ml
return nil
}
func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
digester := digest.Canonical.New()
tr := io.TeeReader(ts, digester.Hash())
tsw, err := tx.TarSplitWriter(true)
if err != nil {
return err
}
metaPacker := storage.NewJSONPacker(tsw)
defer tsw.Close()
// we're passing nil here for the file putter, because the ApplyDiff will
// handle the extraction of the archive
rdr, err := asm.NewInputTarStream(tr, metaPacker, nil)
if err != nil {
return err
}
applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr))
if err != nil {
return err
}
// Discard trailing data but ensure metadata is picked up to reconstruct stream
io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed
layer.size = applySize
layer.diffID = DiffID(digester.Digest())
logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)
return nil
}
func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{})
}
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
// err is used to hold the error which will always trigger
// cleanup of creates sources but may not be an error returned
// to the caller (already exists).
var err error
var pid string
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return nil, ErrLayerDoesNotExist
}
pid = p.cacheID
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
if p.depth() >= maxLayerDepth {
err = ErrMaxDepthExceeded
return nil, err
}
}
// Create new roLayer
layer := &roLayer{
parent: p,
cacheID: stringid.GenerateRandomID(),
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
descriptor: descriptor,
}
if err = ls.driver.Create(layer.cacheID, pid, "", nil); err != nil {
return nil, err
}
tx, err := ls.store.StartTransaction()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
if err := ls.driver.Remove(layer.cacheID); err != nil {
logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
}
if err := tx.Cancel(); err != nil {
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
}
}
}()
if err = ls.applyTar(tx, ts, pid, layer); err != nil {
return nil, err
}
if layer.parent == nil {
layer.chainID = ChainID(layer.diffID)
} else {
layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID)
}
if err = storeLayer(tx, layer); err != nil {
return nil, err
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
// Set error for cleanup, but do not return the error
err = errors.New("layer already exists")
return existingLayer.getReference(), nil
}
if err = tx.Commit(layer.chainID); err != nil {
return nil, err
}
ls.layerMap[layer.chainID] = layer
return layer.getReference(), nil
}
func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer {
l, ok := ls.layerMap[layer]
if !ok {
return nil
}
l.referenceCount++
return l
}
func (ls *layerStore) get(l ChainID) *roLayer {
ls.layerL.Lock()
defer ls.layerL.Unlock()
return ls.getWithoutLock(l)
}
func (ls *layerStore) Get(l ChainID) (Layer, error) {
ls.layerL.Lock()
defer ls.layerL.Unlock()
layer := ls.getWithoutLock(l)
if layer == nil {
return nil, ErrLayerDoesNotExist
}
return layer.getReference(), nil
}
func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
err := ls.driver.Remove(layer.cacheID)
if err != nil {
return err
}
err = ls.store.Remove(layer.chainID)
if err != nil {
return err
}
metadata.DiffID = layer.diffID
metadata.ChainID = layer.chainID
metadata.Size, err = layer.Size()
if err != nil {
return err
}
metadata.DiffSize = layer.size
return nil
}
func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) {
depth := 0
removed := []Metadata{}
for {
if l.referenceCount == 0 {
panic("layer not retained")
}
l.referenceCount--
if l.referenceCount != 0 {
return removed, nil
}
if len(removed) == 0 && depth > 0 {
panic("cannot remove layer with child")
}
if l.hasReferences() {
panic("cannot delete referenced layer")
}
var metadata Metadata
if err := ls.deleteLayer(l, &metadata); err != nil {
return nil, err
}
delete(ls.layerMap, l.chainID)
removed = append(removed, metadata)
if l.parent == nil {
return removed, nil
}
depth++
l = l.parent
}
}
func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
ls.layerL.Lock()
defer ls.layerL.Unlock()
layer, ok := ls.layerMap[l.ChainID()]
if !ok {
return []Metadata{}, nil
}
if !layer.hasReference(l) {
return nil, ErrLayerNotRetained
}
layer.deleteReference(l)
return ls.releaseLayer(layer)
}
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[name]
if ok {
return nil, ErrMountNameConflict
}
var err error
var pid string
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return nil, ErrLayerDoesNotExist
}
pid = p.cacheID
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
}
m = &mountedLayer{
name: name,
parent: p,
mountID: ls.mountID(name),
layerStore: ls,
references: map[RWLayer]*referencedRWLayer{},
}
if initFunc != nil {
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
if err != nil {
return nil, err
}
m.initID = pid
}
if err = ls.driver.CreateReadWrite(m.mountID, pid, "", storageOpt); err != nil {
return nil, err
}
if err = ls.saveMount(m); err != nil {
return nil, err
}
return m.getReference(), nil
}
func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
mount, ok := ls.mounts[id]
if !ok {
return nil, ErrMountDoesNotExist
}
return mount.getReference(), nil
}
func (ls *layerStore) GetMountID(id string) (string, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
mount, ok := ls.mounts[id]
if !ok {
return "", ErrMountDoesNotExist
}
logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
return mount.mountID, nil
}
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[l.Name()]
if !ok {
return []Metadata{}, nil
}
if err := m.deleteReference(l); err != nil {
return nil, err
}
if m.hasReferences() {
return []Metadata{}, nil
}
if err := ls.driver.Remove(m.mountID); err != nil {
logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
m.retakeReference(l)
return nil, err
}
if m.initID != "" {
if err := ls.driver.Remove(m.initID); err != nil {
logrus.Errorf("Error removing init layer %s: %s", m.name, err)
m.retakeReference(l)
return nil, err
}
}
if err := ls.store.RemoveMount(m.name); err != nil {
logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err)
m.retakeReference(l)
return nil, err
}
delete(ls.mounts, m.Name())
ls.layerL.Lock()
defer ls.layerL.Unlock()
if m.parent != nil {
return ls.releaseLayer(m.parent)
}
return []Metadata{}, nil
}
func (ls *layerStore) saveMount(mount *mountedLayer) error {
if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil {
return err
}
if mount.initID != "" {
if err := ls.store.SetInitID(mount.name, mount.initID); err != nil {
return err
}
}
if mount.parent != nil {
if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil {
return err
}
}
ls.mounts[mount.name] = mount
return nil
}
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
// Use "<graph-id>-init" to maintain compatibility with graph drivers
// which are expecting this layer with this special name. If all
// graph drivers can be updated to not rely on knowing about this layer
// then the initID should be randomly generated.
initID := fmt.Sprintf("%s-init", graphID)
if err := ls.driver.Create(initID, parent, mountLabel, storageOpt); err != nil {
return "", err
}
p, err := ls.driver.Get(initID, "")
if err != nil {
return "", err
}
if err := initFunc(p); err != nil {
ls.driver.Put(initID)
return "", err
}
if err := ls.driver.Put(initID); err != nil {
return "", err
}
return initID, nil
}
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
if !ok {
diffDriver = &naiveDiffPathDriver{ls.driver}
}
defer metadata.Close()
// get our relative path to the container
fileGetCloser, err := diffDriver.DiffGetter(graphID)
if err != nil {
return err
}
defer fileGetCloser.Close()
metaUnpacker := storage.NewJSONUnpacker(metadata)
upackerCounter := &unpackSizeCounter{metaUnpacker, size}
logrus.Debugf("Assembling tar data for %s", graphID)
return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
}
func (ls *layerStore) Cleanup() error {
return ls.driver.Cleanup()
}
func (ls *layerStore) DriverStatus() [][2]string {
return ls.driver.Status()
}
func (ls *layerStore) DriverName() string {
return ls.driver.String()
}
type naiveDiffPathDriver struct {
graphdriver.Driver
}
type fileGetPutter struct {
storage.FileGetter
driver graphdriver.Driver
id string
}
func (w *fileGetPutter) Close() error {
return w.driver.Put(w.id)
}
func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
p, err := n.Driver.Get(id, "")
if err != nil {
return nil, err
}
return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
}

View File

@ -0,0 +1,11 @@
package layer
import (
"io"
"github.com/docker/distribution"
)
func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, descriptor)
}

9
vendor/github.com/docker/docker/layer/layer_unix.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build linux freebsd darwin openbsd solaris
package layer
import "github.com/docker/docker/pkg/stringid"
func (ls *layerStore) mountID(name string) string {
return stringid.GenerateRandomID()
}

98
vendor/github.com/docker/docker/layer/layer_windows.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package layer
import (
"errors"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/daemon/graphdriver"
)
// GetLayerPath returns the path to a layer
func GetLayerPath(s Store, layer ChainID) (string, error) {
ls, ok := s.(*layerStore)
if !ok {
return "", errors.New("unsupported layer store")
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
rl, ok := ls.layerMap[layer]
if !ok {
return "", ErrLayerDoesNotExist
}
path, err := ls.driver.Get(rl.cacheID, "")
if err != nil {
return "", err
}
if err := ls.driver.Put(rl.cacheID); err != nil {
return "", err
}
return path, nil
}
func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) {
var err error // this is used for cleanup in existingLayer case
diffID := digest.FromBytes([]byte(graphID))
// Create new roLayer
layer := &roLayer{
cacheID: graphID,
diffID: DiffID(diffID),
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
size: size,
}
tx, err := ls.store.StartTransaction()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if err := tx.Cancel(); err != nil {
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
}
}
}()
layer.chainID = createChainIDFromParent("", layer.diffID)
if !ls.driver.Exists(layer.cacheID) {
return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID)
}
if err = storeLayer(tx, layer); err != nil {
return nil, err
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
// Set error for cleanup, but do not return
err = errors.New("layer already exists")
return existingLayer.getReference(), nil
}
if err = tx.Commit(layer.chainID); err != nil {
return nil, err
}
ls.layerMap[layer.chainID] = layer
return layer.getReference(), nil
}
func (ls *layerStore) mountID(name string) string {
// windows has issues if container ID doesn't match mount ID
return name
}
func (ls *layerStore) GraphDriver() graphdriver.Driver {
return ls.driver
}

256
vendor/github.com/docker/docker/layer/migration.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
package layer
import (
"compress/gzip"
"errors"
"fmt"
"io"
"os"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// CreateRWLayerByGraphID creates a RWLayer in the layer store using
// the provided name with the given graphID. To get the RWLayer
// after migration the layer may be retrieved by the given name.
func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[name]
if ok {
if m.parent.chainID != parent {
return errors.New("name conflict, mismatched parent")
}
if m.mountID != graphID {
return errors.New("mount already exists")
}
return nil
}
if !ls.driver.Exists(graphID) {
return fmt.Errorf("graph ID does not exist: %q", graphID)
}
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return ErrLayerDoesNotExist
}
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
}
// TODO: Ensure graphID has correct parent
m = &mountedLayer{
name: name,
parent: p,
mountID: graphID,
layerStore: ls,
references: map[RWLayer]*referencedRWLayer{},
}
// Check for existing init layer
initID := fmt.Sprintf("%s-init", graphID)
if ls.driver.Exists(initID) {
m.initID = initID
}
if err = ls.saveMount(m); err != nil {
return err
}
return nil
}
func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
defer func() {
if err != nil {
logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err)
diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath)
}
}()
if oldTarDataPath == "" {
err = errors.New("no tar-split file")
return
}
tarDataFile, err := os.Open(oldTarDataPath)
if err != nil {
return
}
defer tarDataFile.Close()
uncompressed, err := gzip.NewReader(tarDataFile)
if err != nil {
return
}
dgst := digest.Canonical.New()
err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash())
if err != nil {
return
}
diffID = DiffID(dgst.Digest())
err = os.RemoveAll(newTarDataPath)
if err != nil {
return
}
err = os.Link(oldTarDataPath, newTarDataPath)
return
}
func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
rawarchive, err := ls.driver.Diff(id, parent)
if err != nil {
return
}
defer rawarchive.Close()
f, err := os.Create(newTarDataPath)
if err != nil {
return
}
defer f.Close()
mfz := gzip.NewWriter(f)
defer mfz.Close()
metaPacker := storage.NewJSONPacker(mfz)
packerCounter := &packSizeCounter{metaPacker, &size}
archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
if err != nil {
return
}
dgst, err := digest.FromReader(archive)
if err != nil {
return
}
diffID = DiffID(dgst)
return
}
func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) {
// err is used to hold the error which will always trigger
// cleanup of creates sources but may not be an error returned
// to the caller (already exists).
var err error
var p *roLayer
if string(parent) != "" {
p = ls.get(parent)
if p == nil {
return nil, ErrLayerDoesNotExist
}
// Release parent chain if error
defer func() {
if err != nil {
ls.layerL.Lock()
ls.releaseLayer(p)
ls.layerL.Unlock()
}
}()
}
// Create new roLayer
layer := &roLayer{
parent: p,
cacheID: graphID,
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
diffID: diffID,
size: size,
chainID: createChainIDFromParent(parent, diffID),
}
ls.layerL.Lock()
defer ls.layerL.Unlock()
if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
// Set error for cleanup, but do not return
err = errors.New("layer already exists")
return existingLayer.getReference(), nil
}
tx, err := ls.store.StartTransaction()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err)
if err := tx.Cancel(); err != nil {
logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
}
}
}()
tsw, err := tx.TarSplitWriter(false)
if err != nil {
return nil, err
}
defer tsw.Close()
tdf, err := os.Open(tarDataFile)
if err != nil {
return nil, err
}
defer tdf.Close()
_, err = io.Copy(tsw, tdf)
if err != nil {
return nil, err
}
if err = storeLayer(tx, layer); err != nil {
return nil, err
}
if err = tx.Commit(layer.chainID); err != nil {
return nil, err
}
ls.layerMap[layer.chainID] = layer
return layer.getReference(), nil
}
type unpackSizeCounter struct {
unpacker storage.Unpacker
size *int64
}
func (u *unpackSizeCounter) Next() (*storage.Entry, error) {
e, err := u.unpacker.Next()
if err == nil && u.size != nil {
*u.size += e.Size
}
return e, err
}
type packSizeCounter struct {
packer storage.Packer
size *int64
}
func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) {
n, err := p.packer.AddEntry(e)
if err == nil && p.size != nil {
*p.size += e.Size
}
return n, err
}

103
vendor/github.com/docker/docker/layer/mounted_layer.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
package layer
import (
"io"
"github.com/docker/docker/pkg/archive"
)
type mountedLayer struct {
name string
mountID string
initID string
parent *roLayer
path string
layerStore *layerStore
references map[RWLayer]*referencedRWLayer
}
func (ml *mountedLayer) cacheParent() string {
if ml.initID != "" {
return ml.initID
}
if ml.parent != nil {
return ml.parent.cacheID
}
return ""
}
func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
if err != nil {
return nil, err
}
return archiver, nil
}
func (ml *mountedLayer) Name() string {
return ml.name
}
func (ml *mountedLayer) Parent() Layer {
if ml.parent != nil {
return ml.parent
}
// Return a nil interface instead of an interface wrapping a nil
// pointer.
return nil
}
func (ml *mountedLayer) Size() (int64, error) {
return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
}
func (ml *mountedLayer) Changes() ([]archive.Change, error) {
return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent())
}
func (ml *mountedLayer) Metadata() (map[string]string, error) {
return ml.layerStore.driver.GetMetadata(ml.mountID)
}
func (ml *mountedLayer) getReference() RWLayer {
ref := &referencedRWLayer{
mountedLayer: ml,
}
ml.references[ref] = ref
return ref
}
func (ml *mountedLayer) hasReferences() bool {
return len(ml.references) > 0
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
if _, ok := ml.references[ref]; !ok {
return ErrLayerNotRetained
}
delete(ml.references, ref)
return nil
}
func (ml *mountedLayer) retakeReference(r RWLayer) {
if ref, ok := r.(*referencedRWLayer); ok {
ml.references[ref] = ref
}
}
type referencedRWLayer struct {
*mountedLayer
}
func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
}
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error {
return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
}

172
vendor/github.com/docker/docker/layer/ro_layer.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
package layer
import (
"fmt"
"io"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
)
type roLayer struct {
chainID ChainID
diffID DiffID
parent *roLayer
cacheID string
size int64
layerStore *layerStore
descriptor distribution.Descriptor
referenceCount int
references map[Layer]struct{}
}
func (rl *roLayer) TarStream() (io.ReadCloser, error) {
r, err := rl.layerStore.store.TarSplitReader(rl.chainID)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw)
if err != nil {
pw.CloseWithError(err)
} else {
pw.Close()
}
}()
rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID))
if err != nil {
return nil, err
}
return rc, nil
}
func (rl *roLayer) ChainID() ChainID {
return rl.chainID
}
func (rl *roLayer) DiffID() DiffID {
return rl.diffID
}
func (rl *roLayer) Parent() Layer {
if rl.parent == nil {
return nil
}
return rl.parent
}
func (rl *roLayer) Size() (size int64, err error) {
if rl.parent != nil {
size, err = rl.parent.Size()
if err != nil {
return
}
}
return size + rl.size, nil
}
func (rl *roLayer) DiffSize() (size int64, err error) {
return rl.size, nil
}
func (rl *roLayer) Metadata() (map[string]string, error) {
return rl.layerStore.driver.GetMetadata(rl.cacheID)
}
type referencedCacheLayer struct {
*roLayer
}
func (rl *roLayer) getReference() Layer {
ref := &referencedCacheLayer{
roLayer: rl,
}
rl.references[ref] = struct{}{}
return ref
}
func (rl *roLayer) hasReference(ref Layer) bool {
_, ok := rl.references[ref]
return ok
}
func (rl *roLayer) hasReferences() bool {
return len(rl.references) > 0
}
func (rl *roLayer) deleteReference(ref Layer) {
delete(rl.references, ref)
}
func (rl *roLayer) depth() int {
if rl.parent == nil {
return 1
}
return rl.parent.depth() + 1
}
func storeLayer(tx MetadataTransaction, layer *roLayer) error {
if err := tx.SetDiffID(layer.diffID); err != nil {
return err
}
if err := tx.SetSize(layer.size); err != nil {
return err
}
if err := tx.SetCacheID(layer.cacheID); err != nil {
return err
}
// Do not store empty descriptors
if layer.descriptor.Digest != "" {
if err := tx.SetDescriptor(layer.descriptor); err != nil {
return err
}
}
if layer.parent != nil {
if err := tx.SetParent(layer.parent.chainID); err != nil {
return err
}
}
return nil
}
func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
verifier, err := digest.NewDigestVerifier(dgst)
if err != nil {
return nil, err
}
return &verifiedReadCloser{
rc: rc,
dgst: dgst,
verifier: verifier,
}, nil
}
type verifiedReadCloser struct {
rc io.ReadCloser
dgst digest.Digest
verifier digest.Verifier
}
func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
n, err = vrc.rc.Read(p)
if n > 0 {
if n, err := vrc.verifier.Write(p[:n]); err != nil {
return n, err
}
}
if err == io.EOF {
if !vrc.verifier.Verified() {
err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst)
}
}
return
}
func (vrc *verifiedReadCloser) Close() error {
return vrc.rc.Close()
}

View File

@ -0,0 +1,9 @@
package layer
import "github.com/docker/distribution"
var _ distribution.Describable = &roLayer{}
func (rl *roLayer) Descriptor() distribution.Descriptor {
return rl.descriptor
}

151
vendor/github.com/docker/docker/opts/hosts.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package opts
import (
"fmt"
"net"
"net/url"
"strconv"
"strings"
)
var (
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
// These are the IANA registered port numbers for use with Docker
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
// DefaultTCPHost constant defines the default host string used by docker on Windows
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
// DefaultNamedPipe defines the default named pipe used by docker on Windows
DefaultNamedPipe = `//./pipe/docker_engine`
)
// ValidateHost validates that the specified string is a valid host and returns it.
func ValidateHost(val string) (string, error) {
host := strings.TrimSpace(val)
// The empty string means default and is not handled by parseDockerDaemonHost
if host != "" {
_, err := parseDockerDaemonHost(host)
if err != nil {
return val, err
}
}
// Note: unlike most flag validators, we don't return the mutated value here
// we need to know what the user entered later (using ParseHost) to adjust for tls
return val, nil
}
// ParseHost and set defaults for a Daemon host string
func ParseHost(defaultToTLS bool, val string) (string, error) {
host := strings.TrimSpace(val)
if host == "" {
if defaultToTLS {
host = DefaultTLSHost
} else {
host = DefaultHost
}
} else {
var err error
host, err = parseDockerDaemonHost(host)
if err != nil {
return val, err
}
}
return host, nil
}
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
func parseDockerDaemonHost(addr string) (string, error) {
addrParts := strings.SplitN(addr, "://", 2)
if len(addrParts) == 1 && addrParts[0] != "" {
addrParts = []string{"tcp", addrParts[0]}
}
switch addrParts[0] {
case "tcp":
return ParseTCPAddr(addrParts[1], DefaultTCPHost)
case "unix":
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
case "npipe":
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
case "fd":
return addr, nil
default:
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
}
// parseSimpleProtoAddr parses and validates that the specified address is a valid
// socket address for simple protocols like unix and npipe. It returns a formatted
// socket address, either using the address parsed from addr, or the contents of
// defaultAddr if addr is a blank string.
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, proto+"://")
if strings.Contains(addr, "://") {
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
}
if addr == "" {
addr = defaultAddr
}
return fmt.Sprintf("%s://%s", proto, addr), nil
}
// ParseTCPAddr parses and validates that the specified address is a valid TCP
// address. It returns a formatted TCP address, either using the address parsed
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
// tryAddr is expected to have already been Trim()'d
// defaultAddr must be in the full `tcp://host:port` form
func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
if tryAddr == "" || tryAddr == "tcp://" {
return defaultAddr, nil
}
addr := strings.TrimPrefix(tryAddr, "tcp://")
if strings.Contains(addr, "://") || addr == "" {
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
}
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
if err != nil {
return "", err
}
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
// not 1.4. See https://github.com/golang/go/issues/12200 and
// https://github.com/golang/go/issues/6530.
if strings.HasSuffix(addr, "]:") {
addr += defaultPort
}
u, err := url.Parse("tcp://" + addr)
if err != nil {
return "", err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
// try port addition once
host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
}
if err != nil {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}
if host == "" {
host = defaultHost
}
if port == "" {
port = defaultPort
}
p, err := strconv.Atoi(port)
if err != nil && p == 0 {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
}

View File

@ -3,4 +3,4 @@
package opts
// DefaultHost constant defines the default host string used by docker on Windows
var DefaultHost = DefaultTCPHost
var DefaultHost = "npipe://" + DefaultNamedPipe

View File

@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
}
// Set sets an IPv4 or IPv6 address from a given string. If the given
// string is not parsable as an IP address it returns an error.
// string is not parseable as an IP address it returns an error.
func (o *IPOpt) Set(val string) error {
ip := net.ParseIP(val)
if ip == nil {

View File

@ -3,33 +3,15 @@ package opts
import (
"fmt"
"net"
"os"
"path"
"regexp"
"strings"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/engine-api/types/filters"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
// is not supplied. A better longer term solution would be to use a named
// pipe as the default on the Windows daemon.
// These are the IANA registered port numbers for use with Docker
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
// DefaultTCPHost constant defines the default host string used by docker on Windows
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
)
// ListOpts holds a list of values and a validation function.
@ -56,7 +38,7 @@ func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and add it to the
// Set validates if needed the input value and adds it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
@ -95,6 +77,16 @@ func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
v := *opts.values
if v == nil {
return make([]string, 0)
}
return v
}
// Get checks the existence of the specified key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
@ -110,6 +102,40 @@ func (opts *ListOpts) Len() int {
return len((*opts.values))
}
// Type returns a string name for this Option type
func (opts *ListOpts) Type() string {
return "list"
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
Name() string
}
// NamedListOpts is a ListOpts with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedListOpts struct {
name string
ListOpts
}
var _ NamedOption = &NamedListOpts{}
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
return &NamedListOpts{
name: name,
ListOpts: *NewListOptsRef(values, validator),
}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *NamedListOpts) Name() string {
return o.name
}
//MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
@ -144,6 +170,11 @@ func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// Type returns a string name for this Option type
func (opts *MapOpts) Type() string {
return "map"
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
@ -155,116 +186,35 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
}
}
// NamedMapOpts is a MapOpts struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOpts struct {
name string
MapOpts
}
var _ NamedOption = &NamedMapOpts{}
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
return &NamedMapOpts{
name: name,
MapOpts: *NewMapOpts(values, validator),
}
}
// Name returns the name of the NamedMapOpts in the configuration.
func (o *NamedMapOpts) Name() string {
return o.name
}
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateAttach validates that the specified string is a valid attach option.
func ValidateAttach(val string) (string, error) {
s := strings.ToLower(val)
for _, str := range []string{"stdin", "stdout", "stderr"} {
if s == str {
return s, nil
}
}
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
}
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
if _, _, err := parsers.ParseLink(val); err != nil {
return val, err
}
return val, nil
}
// ValidDeviceMode checks if the mode for device is valid or not.
// Valid mode is a composition of r (read), w (write), and m (mknod).
func ValidDeviceMode(mode string) bool {
var legalDeviceMode = map[rune]bool{
'r': true,
'w': true,
'm': true,
}
if mode == "" {
return false
}
for _, c := range mode {
if !legalDeviceMode[c] {
return false
}
legalDeviceMode[c] = false
}
return true
}
// ValidateDevice validates a path for devices
// It will make sure 'val' is in the form:
// [host-dir:]container-path[:mode]
// It also validates the device mode.
func ValidateDevice(val string) (string, error) {
return validatePath(val, ValidDeviceMode)
}
func validatePath(val string, validator func(string) bool) (string, error) {
var containerPath string
var mode string
if strings.Count(val, ":") > 2 {
return val, fmt.Errorf("bad format for path: %s", val)
}
split := strings.SplitN(val, ":", 3)
if split[0] == "" {
return val, fmt.Errorf("bad format for path: %s", val)
}
switch len(split) {
case 1:
containerPath = split[0]
val = path.Clean(containerPath)
case 2:
if isValid := validator(split[1]); isValid {
containerPath = split[0]
mode = split[1]
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
} else {
containerPath = split[1]
val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath))
}
case 3:
containerPath = split[1]
mode = split[2]
if isValid := validator(split[2]); !isValid {
return val, fmt.Errorf("bad mode specified: %s", mode)
}
val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode)
}
if !path.IsAbs(containerPath) {
return val, fmt.Errorf("%s is not an absolute path", containerPath)
}
return val, nil
}
// ValidateEnv validates an environment variable and returns it.
// If no value is specified, it returns the current value using os.Getenv.
//
// As on ParseEnvFile and related to #16585, environment variable names
// are not validate what so ever, it's up to application inside docker
// to validate them or not.
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if len(arr) > 1 {
return val, nil
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
@ -274,15 +224,6 @@ func ValidateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
@ -303,20 +244,6 @@ func validateDomain(val string) (string, error) {
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return val, nil
}
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
@ -326,32 +253,69 @@ func ValidateLabel(val string) (string, error) {
return val, nil
}
// ValidateHost validates that the specified string is a valid host and returns it.
func ValidateHost(val string) (string, error) {
_, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
if err != nil {
return val, err
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
// Note: unlike most flag validators, we don't return the mutated value here
// we need to know what the user entered later (using ParseHost) to adjust for tls
return val, nil
}
// ParseHost and set defaults for a Daemon host string
func ParseHost(defaultHost, val string) (string, error) {
host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
if err != nil {
return val, err
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
return host, nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)
if parts[0] == name {
return true
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return false
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}

View File

@ -1,10 +1,10 @@
package opts
// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4.
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
// @jhowardmsft, @swernli.
//
// On Windows, this mitigates a problem with the default options of running
// a docker client against a local docker daemon on TP4.
// a docker client against a local docker daemon on TP5.
//
// What was found that if the default host is "localhost", even if the client
// (and daemon as this is local) is not physically on a network, and the DNS
@ -22,7 +22,7 @@ package opts
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=toDiskLocking....
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
@ -35,7 +35,7 @@ package opts
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
//
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory,
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
// the Windows networking stack is supposed to resolve "localhost" internally,
// without hitting DNS, or even reading the hosts file (which is why localhost
// is commented out in the hosts file on Windows).
@ -44,12 +44,12 @@ package opts
// address does not cause the delay.
//
// This does not occur with the docker client built with 1.4.3 on the same
// Windows TP4 build, regardless of whether the daemon is built using 1.5.1
// Windows build, regardless of whether the daemon is built using 1.5.1
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
// on a cross-compiled Windows binary (from Linux).
//
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...'
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
// explicitly.
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080

View File

@ -31,8 +31,10 @@ type (
Archive io.ReadCloser
// Reader is a type of io.Reader.
Reader io.Reader
// Compression is the state represtents if compressed or not.
// Compression is the state represents if compressed or not.
Compression int
// WhiteoutFormat is the format of whiteouts unpacked
WhiteoutFormat int
// TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct {
UID, GID int
@ -47,6 +49,10 @@ type (
GIDMaps []idtools.IDMap
ChownOpts *TarChownOptions
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
@ -77,6 +83,11 @@ var (
defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
)
const (
// HeaderSize is the size in bytes of a tar header
HeaderSize = 512
)
const (
// Uncompressed represents the uncompressed.
Uncompressed Compression = iota
@ -88,7 +99,16 @@ const (
Xz
)
// IsArchive checks if it is a archive by the header.
const (
// AUFSWhiteoutFormat is the default format for whiteouts
AUFSWhiteoutFormat WhiteoutFormat = iota
// OverlayWhiteoutFormat formats whiteout according to the overlay
// standard.
OverlayWhiteoutFormat
)
// IsArchive checks for the magic bytes of a tar or any supported compression
// algorithm.
func IsArchive(header []byte) bool {
compression := DetectCompression(header)
if compression != Uncompressed {
@ -99,6 +119,23 @@ func IsArchive(header []byte) bool {
return err == nil
}
// IsArchivePath checks if the (possibly compressed) file at the given path
// starts with a tar file header.
func IsArchivePath(path string) bool {
file, err := os.Open(path)
if err != nil {
return false
}
defer file.Close()
rdr, err := DecompressStream(file)
if err != nil {
return false
}
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
}
// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
@ -107,7 +144,7 @@ func DetectCompression(source []byte) Compression {
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if len(source) < len(m) {
logrus.Debugf("Len too short")
logrus.Debug("Len too short")
continue
}
if bytes.Compare(m, source[:len(m)]) == 0 {
@ -123,12 +160,18 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
return cmdStream(exec.Command(args[0], args[1:]...), archive)
}
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
if err != nil {
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
// that results in an io.EOF from the Peek() call. So, in those
// cases we'll just treat it as a non-compressed stream and
// that means just create an empty layer.
// See Issue 18170
return nil, err
}
@ -163,8 +206,8 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
}
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
// CompressStream compresseses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
@ -199,6 +242,11 @@ func (compression *Compression) Extension() string {
return ""
}
type tarWhiteoutConverter interface {
ConvertWrite(*tar.Header, string, os.FileInfo) error
ConvertRead(*tar.Header, string) (bool, error)
}
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
@ -207,6 +255,12 @@ type tarAppender struct {
SeenFiles map[uint64]string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter
}
// canonicalTarName provides a platform-independent and consistent posix-style
@ -224,6 +278,7 @@ func canonicalTarName(name string, isDir bool) (string, error) {
return name, nil
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
@ -275,8 +330,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
//handle re-mapping container ID mappings back to host ID mappings before
//writing tar headers/files
if ta.UIDMaps != nil || ta.GIDMaps != nil {
//writing tar headers/files. We skip whiteout files because they were written
//by the kernel and already have proper ownership relative to the host
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
uid, gid, err := getFileUIDGID(fi.Sys())
if err != nil {
return err
@ -293,11 +349,17 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Gid = xGID
}
if ta.WhiteoutConverter != nil {
if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
return err
}
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg {
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
file, err := os.Open(path)
if err != nil {
return err
@ -378,7 +440,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
case tar.TypeXGlobalHeader:
logrus.Debugf("PAX Global Extended Headers found and ignored")
logrus.Debug("PAX Global Extended Headers found and ignored")
return nil
default:
@ -395,10 +457,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
var errors []string
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
if err == syscall.ENOTSUP {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
// bail.
errors = append(errors, err.Error())
continue
}
return err
}
}
if len(errors) > 0 {
logrus.WithFields(logrus.Fields{
"errors": errors,
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
}
// There is no LChmod, so ignore mode for symlink. Also, this
@ -407,19 +485,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return err
}
aTime := hdr.AccessTime
if aTime.Before(hdr.ModTime) {
// Last access time should never be before last modified time.
aTime = hdr.ModTime
}
// system.Chtimes doesn't support a NOFOLLOW flag atm
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
return err
}
} else {
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
@ -456,23 +540,24 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
}
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close tar writer: %s", err)
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Debugf("Can't close compress writer: %s", err)
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Debugf("Can't close pipe writer: %s", err)
logrus.Errorf("Can't close pipe writer: %s", err)
}
}()
@ -515,7 +600,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
@ -540,16 +625,42 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
if err != nil {
logrus.Debugf("Error matching %s: %v", relFilePath, err)
logrus.Errorf("Error matching %s: %v", relFilePath, err)
return err
}
}
if skip {
if !exceptions && f.IsDir() {
// If we want to skip this file and its a directory
// then we should first check to see if there's an
// excludes pattern (eg !dir/file) that starts with this
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
if !f.IsDir() {
return nil
}
// No exceptions (!...) in patterns so just skip dir
if !exceptions {
return filepath.SkipDir
}
return nil
dirSlash := relFilePath + string(filepath.Separator)
for _, pat := range patterns {
if pat[0] != '!' {
continue
}
pat = pat[1:] + string(filepath.Separator)
if strings.HasPrefix(pat, dirSlash) {
// found a match - so can't skip this dir
return nil
}
}
// No matching exclusion dir so just skip dir
return filepath.SkipDir
}
if seen[relFilePath] {
@ -571,7 +682,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
return err
}
}
return nil
})
@ -592,6 +707,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
if err != nil {
return err
}
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
// Iterate through the files in the archive.
loop:
@ -624,7 +740,7 @@ loop:
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0777)
err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
if err != nil {
return err
}
@ -691,6 +807,16 @@ loop:
hdr.Gid = xGID
}
if whiteoutConverter != nil {
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
if err != nil {
return err
}
if !writeFile {
continue
}
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err
}
@ -794,10 +920,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
GIDMaps: archiver.GIDMaps,
}
}
if err := archiver.Untar(archive, dst, options); err != nil {
return err
}
return nil
return archiver.Untar(archive, dst, options)
}
// UntarPath is a convenience function which looks for an archive
@ -818,9 +941,17 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
if !srcSt.IsDir() {
return archiver.CopyFileWithTar(src, dst)
}
// if this archiver is set up with ID mapping we need to create
// the new destination directory with the remapped root UID/GID pair
// as owner
rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
if err != nil {
return err
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
if err := system.MkdirAll(dst, 0755); err != nil {
if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)

View File

@ -0,0 +1,90 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/system"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
hdr.Name = WhiteoutPrefix + hdr.Name
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return err
}
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
*hdr = tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return nil
}
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,7 @@
// +build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View File

@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a seperate function as this is platform specific. On Linux, we
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {

View File

@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string {
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a seperate function as this is platform specific.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}

View File

@ -81,6 +81,33 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
@ -105,21 +132,24 @@ func Changes(layers []string, rw string) ([]Change, error) {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
return err
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, WhiteoutPrefix) {
originalFile := file[len(WhiteoutPrefix):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
@ -150,7 +180,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}

View File

@ -283,3 +283,30 @@ func clen(n []byte) int {
}
return len(n)
}
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
return path, nil
}
}
return "", nil
}

View File

@ -135,30 +135,17 @@ type CopyInfo struct {
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string) (CopyInfo, error) {
// Split the given path into its Directory and Base components. We will
// evaluate symlinks in the directory component then append the base.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
dirPath, basePath := filepath.Split(path)
resolvedDirPath, err := filepath.EvalSymlinks(dirPath)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath
var rebaseName string
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
@ -279,7 +266,10 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
@ -287,7 +277,10 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
@ -301,14 +294,17 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// rebaseArchiveEntries rewrites the given srcContent archive replacing
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
@ -355,7 +351,7 @@ func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string) error {
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
@ -369,7 +365,7 @@ func CopyResource(srcPath, dstPath string) error {
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil {
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
@ -405,3 +401,58 @@ func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@ -9,7 +9,7 @@ package archive
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for remoing an actaul file. Normally these files are excluded from exported
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix

View File

@ -0,0 +1,97 @@
package chrootarchive
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
)
var chrootArchiver = &archive.Archiver{Untar: Untar}
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
}
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, false)
}
// Handler for teasing out the automatic decompression
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
if options == nil {
options = &archive.TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return err
}
dest = filepath.Clean(dest)
if _, err := os.Stat(dest); os.IsNotExist(err) {
if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil {
return err
}
}
r := ioutil.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
return err
}
defer decompressedArchive.Close()
r = decompressedArchive
}
return invokeUnpack(r, dest, options)
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
// If either Tar or Untar fails, TarUntar aborts and returns the error.
func TarUntar(src, dst string) error {
return chrootArchiver.TarUntar(src, dst)
}
// CopyWithTar creates a tar archive of filesystem path `src`, and
// unpacks it at filesystem path `dst`.
// The archive is streamed directly with fixed buffering and no
// intermediary disk IO.
func CopyWithTar(src, dst string) error {
return chrootArchiver.CopyWithTar(src, dst)
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
//
// If `dst` ends with a trailing slash '/' ('\' on Windows), the final
// destination path will be `dst/base(src)` or `dst\base(src)`
func CopyFileWithTar(src, dst string) (err error) {
return chrootArchiver.CopyFileWithTar(src, dst)
}
// UntarPath is a convenience function which looks for an archive
// at filesystem path `src`, and unpacks it at `dst`.
func UntarPath(src, dst string) error {
return chrootArchiver.UntarPath(src, dst)
}

View File

@ -0,0 +1,86 @@
// +build !windows
package chrootarchive
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
// untar is the entry-point for docker-untar on re-exec. This is not used on
// Windows as it does not support chroot, hence no point sandboxing through
// chroot and rexec.
func untar() {
runtime.LockOSThread()
flag.Parse()
var options *archive.TarOptions
//read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
if _, err := flush(os.Stdin); err != nil {
fatal(err)
}
os.Exit(0)
}
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
// when the full image list is passed (e.g. when this is used by
// `docker load`). We will marshall the options via a pipe to the
// child
r, w, err := os.Pipe()
if err != nil {
return fmt.Errorf("Untar pipe failure: %v", err)
}
cmd := reexec.Command("docker-untar", dest)
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
}
w.Close()
if err := cmd.Wait(); err != nil {
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
io.Copy(ioutil.Discard, decompressedArchive)
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
}
return nil
}

View File

@ -0,0 +1,22 @@
package chrootarchive
import (
"io"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/longpath"
)
// chroot is not supported by Windows
func chroot(path string) error {
return nil
}
func invokeUnpack(decompressedArchive io.ReadCloser,
dest string,
options *archive.TarOptions) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}

View File

@ -0,0 +1,103 @@
package chrootarchive
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
)
// chroot on linux uses pivot_root instead of chroot
// pivot_root takes a new root and an old root.
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
// New root is where the new rootfs is set to.
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
}
if err := mount.MakeRPrivate(path); err != nil {
return err
}
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
if err != nil {
return fmt.Errorf("Error setting up pivot dir: %v", err)
}
var mounted bool
defer func() {
if mounted {
// make sure pivotDir is not mounted before we try to remove it
if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
if err == nil {
err = errCleanup
}
return
}
}
errCleanup := os.Remove(pivotDir)
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
if err == nil {
err = errCleanup
}
}
if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil {
if err == nil {
err = fmt.Errorf("error unmounting root: %v", errCleanup)
}
return
}
}()
if err := syscall.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
}
return realChroot(path)
}
mounted = true
// This is the new path for where the old root (prior to the pivot) has been moved to
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root: %v", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("Error making old root private after pivot: %v", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
}
mounted = false
return nil
}
func realChroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return fmt.Errorf("Error after fallback to chroot: %v", err)
}
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root after chroot: %v", err)
}
return nil
}

View File

@ -0,0 +1,12 @@
// +build !windows,!linux
package chrootarchive
import "syscall"
func chroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return err
}
return syscall.Chdir("/")
}

View File

@ -0,0 +1,19 @@
package chrootarchive
import "github.com/docker/docker/pkg/archive"
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can only be
// uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}

View File

@ -0,0 +1,120 @@
//+build !windows
package chrootarchive
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/system"
)
type applyLayerResponse struct {
LayerSize int64 `json:"layerSize"`
}
// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
// used on Windows as it does not support chroot, hence no point sandboxing
// through chroot and rexec.
func applyLayer() {
var (
tmpDir = ""
err error
options *archive.TarOptions
)
runtime.LockOSThread()
flag.Parse()
if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
// We need to be able to set any perms
oldmask, err := system.Umask(0)
defer system.Umask(oldmask)
if err != nil {
fatal(err)
}
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
fatal(err)
}
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
fatal(err)
}
os.Setenv("TMPDIR", tmpDir)
size, err := archive.UnpackLayer("/", os.Stdin, options)
os.RemoveAll(tmpDir)
if err != nil {
fatal(err)
}
encoder := json.NewEncoder(os.Stdout)
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
}
if _, err := flush(os.Stdin); err != nil {
fatal(err)
}
os.Exit(0)
}
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest)
if decompress {
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompressed.Close()
layer = decompressed
}
if options == nil {
options = &archive.TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
}
cmd := reexec.Command("docker-applyLayer", dest)
cmd.Stdin = layer
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
cmd.Stdout, cmd.Stderr = outBuf, errBuf
if err = cmd.Run(); err != nil {
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
}
// Stdout should be a valid JSON struct representing an applyLayerResponse.
response := applyLayerResponse{}
decoder := json.NewDecoder(outBuf)
if err = decoder.Decode(&response); err != nil {
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
}
return response.LayerSize, nil
}

View File

@ -0,0 +1,44 @@
package chrootarchive
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/longpath"
)
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
// applies it to the directory `dest`. Returns the size in bytes of the
// contents of the layer.
func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
dest = filepath.Clean(dest)
// Ensure it is a Windows-style volume path
dest = longpath.AddPrefix(dest)
if decompress {
decompressed, err := archive.DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompressed.Close()
layer = decompressed
}
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
}
s, err := archive.UnpackLayer(dest, layer, nil)
os.RemoveAll(tmpDir)
if err != nil {
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)
}
return s, nil
}

View File

@ -0,0 +1,28 @@
// +build !windows
package chrootarchive
import (
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/docker/pkg/reexec"
)
func init() {
reexec.Register("docker-applyLayer", applyLayer)
reexec.Register("docker-untar", untar)
}
func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
return io.Copy(ioutil.Discard, r)
}

View File

@ -0,0 +1,4 @@
package chrootarchive
func init() {
}

View File

@ -6,17 +6,19 @@ import (
"io"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"github.com/Sirupsen/logrus"
)
// exclusion return true if the specified pattern is an exclusion
// exclusion returns true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
// empty return true if the specified pattern is empty
// empty returns true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
@ -50,7 +52,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
if exclusion(pattern) {
pattern = pattern[1:]
}
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
}
return cleanedPatterns, patternDirs, exceptions, nil
@ -76,13 +78,14 @@ func Matches(file string, patterns []string) (bool, error) {
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doen't need to do as much error checking and clean-up. This was done to avoid
// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, "/")
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for i, pattern := range patterns {
negative := false
@ -92,16 +95,16 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
pattern = pattern[1:]
}
match, err := filepath.Match(pattern, file)
match, err := regexpMatch(pattern, file)
if err != nil {
return false, err
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
}
}
@ -117,8 +120,104 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
return matched, nil
}
// regexpMatch tries to match the logic of filepath.Match but
// does so using regexp logic. We do this so that we can expand the
// wildcard set to include other things, like "**" to mean any number
// of directories. This means that we should be backwards compatible
// with filepath.Match(). We'll end up supporting more stuff, due to
// the fact that we're using regexp, but that's ok - it does no harm.
//
// As per the comment in golangs filepath.Match, on Windows, escaping
// is disabled. Instead, '\\' is treated as path separator.
func regexpMatch(pattern, path string) (bool, error) {
regStr := "^"
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(pattern, path); err != nil {
return false, err
}
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
sl := string(os.PathSeparator)
escSL := sl
if sl == `\` {
escSL += `\`
}
for scan.Peek() != scanner.EOF {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
regStr += ".*"
} else {
// is "**"
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
}
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if strings.Index(".$", string(ch)) != -1 {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)
} else if ch == '\\' {
// escape next char. Note that a trailing \ in the pattern
// will be left alone (but need to escape it)
if sl == `\` {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += `\` + string(scan.Next())
} else {
regStr += `\`
}
} else {
regStr += string(ch)
}
}
regStr += "$"
res, err := regexp.MatchString(regStr, path)
// Map regexp's error to filepath's so no one knows we're not using filepath
if err != nil {
err = filepath.ErrBadPattern
}
return res, err
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and remove
// on src or an error occurs. It verifies src exists and removes
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)

View File

@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors.
// On Solaris these limits are per process and not systemwide
func GetTotalUsedFds() int {
return -1
}

View File

@ -1,4 +1,4 @@
package utils
package gitutils
import (
"fmt"
@ -14,9 +14,9 @@ import (
"github.com/docker/docker/pkg/urlutil"
)
// GitClone clones a repository into a newly created directory which
// Clone clones a repository into a newly created directory which
// will be under "docker-build-git"
func GitClone(remoteURL string) (string, error) {
func Clone(remoteURL string) (string, error) {
if !urlutil.IsGitTransport(remoteURL) {
remoteURL = "https://" + remoteURL
}

View File

@ -155,6 +155,9 @@ func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
@ -171,15 +174,14 @@ func parseSubidFile(path, username string) (ranges, error) {
}
text := strings.TrimSpace(s.Text())
if text == "" {
if text == "" || strings.HasPrefix(text, "#") {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username {
// return the first entry for a user; ignores potential for multiple ranges per user
if parts[0] == username || username == "ALL" {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)

View File

@ -4,46 +4,34 @@ import (
"fmt"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"syscall"
"sync"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --uid <id> --shell /bin/login --no-create-home --disabled-login --ingroup <groupname> <username>
// useradd -M -u <id> -s /bin/nologin -N -g <groupname> <username>
// addgroup --gid <id> <groupname>
// groupadd -g <id> <groupname>
const baseUID int = 10000
const baseGID int = 10000
const idMAX int = 65534
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
// useradd -r -s /bin/false <username>
var (
userCommand string
groupCommand string
once sync.Once
userCommand string
cmdTemplates = map[string]string{
"adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s",
"useradd": "-M -u %d -s /bin/false -N -g %s %s",
"addgroup": "--gid %d %s",
"groupadd": "-g %d %s",
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
"useradd": "-r -s /bin/false %s",
"usermod": "-%s %d-%d %s",
}
)
func init() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
if _, err := resolveBinary("addgroup"); err == nil {
groupCommand = "addgroup"
} else if _, err := resolveBinary("groupadd"); err == nil {
groupCommand = "groupadd"
}
}
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
userMod = "usermod"
)
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
@ -62,94 +50,139 @@ func resolveBinary(binname string) (string, error) {
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
// This new user's /etc/sub{uid,gid} ranges will be used for user namespace
// AddNamespaceRangesUser takes a username and uses the standard system
// utility to create a system user/group pair used to hold the
// /etc/sub{uid,gid} ranges which will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
// Find unused uid, gid pair
uid, err := findUnusedUID(baseUID)
if err != nil {
return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err)
}
gid, err := findUnusedGID(baseGID)
if err != nil {
return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err)
if err := addUser(name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
// First add the group that we will use
if err := addGroup(name, gid); err != nil {
return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err)
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
}
// Add the user as a member of the group
if err := addUser(name, uid, name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
}
return uid, gid, nil
}
func addUser(userName string, uid int, groupName string) error {
func addUser(userName string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName)
return execAddCmd(userCommand, args)
}
func addGroup(groupName string, gid int) error {
if groupCommand == "" {
return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found")
}
args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName)
// only error out if the error isn't that the group already exists
// if the group exists then our needs are already met
if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") {
return err
}
return nil
}
func execAddCmd(cmd, args string) error {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
out, err := execCmd.CombinedOutput()
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out))
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
}
return nil
}
func findUnusedUID(startUID int) (int, error) {
return findUnused("passwd", startUID)
}
func createSubordinateRanges(name string) error {
func findUnusedGID(startGID int) (int, error) {
return findUnused("group", startGID)
}
func findUnused(file string, id int) (int, error) {
for {
cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id)
cmd := exec.Command("sh", "-c", cmdStr)
if err := cmd.Run(); err != nil {
// if a non-zero return code occurs, then we know the ID was not found
// and is usable
if exiterr, ok := err.(*exec.ExitError); ok {
// The program has exited with an exit code != 0
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
if status.ExitStatus() == 1 {
//no match, we can use this ID
return id, nil
}
}
}
return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err)
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := parseSubuid(name)
if err != nil {
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
return fmt.Errorf("Can't find available subuid range: %v", err)
}
id++
if id > idMAX {
return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file)
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
}
}
ranges, err = parseSubgid(name)
if err != nil {
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
return fmt.Errorf("Can't find available subgid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
ranges, err := parseSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
ranges, err := parseSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextRangeStart(rangeList ranges) (int, error) {
startID := defaultRangeStart
for _, arange := range rangeList {
if wouldOverlap(arange, startID) {
startID = arange.Start + arange.Length
}
}
return startID, nil
}
func wouldOverlap(arange subIDRange, ID int) bool {
low := ID
high := ID + defaultRangeLen
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
(high <= arange.Start+arange.Length && high >= arange.Start) {
return true
}
return false
}
func execCmd(cmd, args string) ([]byte, error) {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
return execCmd.CombinedOutput()
}

51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package ioutils
import (
"errors"
"io"
)
var errBufferFull = errors.New("buffer is full")
type fixedBuffer struct {
buf []byte
pos int
lastRead int
}
func (b *fixedBuffer) Write(p []byte) (int, error) {
n := copy(b.buf[b.pos:cap(b.buf)], p)
b.pos += n
if n < len(p) {
if b.pos == cap(b.buf) {
return n, errBufferFull
}
return n, io.ErrShortWrite
}
return n, nil
}
func (b *fixedBuffer) Read(p []byte) (int, error) {
n := copy(p, b.buf[b.lastRead:b.pos])
b.lastRead += n
return n, nil
}
func (b *fixedBuffer) Len() int {
return b.pos - b.lastRead
}
func (b *fixedBuffer) Cap() int {
return cap(b.buf)
}
func (b *fixedBuffer) Reset() {
b.pos = 0
b.lastRead = 0
b.buf = b.buf[:0]
}
func (b *fixedBuffer) String() string {
return string(b.buf[b.lastRead:b.pos])
}

View File

@ -1,42 +1,80 @@
package ioutils
import (
"errors"
"io"
"sync"
)
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// BytesPipe is io.ReadWriter which works similarly to pipe(queue).
// All written data could be read only once. Also BytesPipe is allocating
// and releasing new byte slices to adjust to current needs, so there won't be
// overgrown buffer after high load peak.
// BytesPipe isn't goroutine-safe, caller must synchronize it if needed.
// minCap is the lowest capacity to use in byte slices that buffer data
const minCap = 64
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
bufPoolsLock sync.Mutex
)
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
type BytesPipe struct {
buf [][]byte // slice of byte-slices of buffered data
lastRead int // index in the first slice to a read point
bufLen int // length of data buffered over the slices
mu sync.Mutex
wait *sync.Cond
buf []*fixedBuffer
bufLen int
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe(buf []byte) *BytesPipe {
if cap(buf) == 0 {
buf = make([]byte, 0, 64)
}
return &BytesPipe{
buf: [][]byte{buf[:0]},
}
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))
bp.wait = sync.NewCond(&bp.mu)
return bp
}
// Write writes p to BytesPipe.
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (n int, err error) {
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
written := 0
loop0:
for {
// write data to the last buffer
if bp.closeErr != nil {
bp.mu.Unlock()
return written, ErrClosed
}
if len(bp.buf) == 0 {
bp.buf = append(bp.buf, getBuffer(64))
}
// get the last buffer
b := bp.buf[len(bp.buf)-1]
// copy data to the current empty allocated area
n := copy(b[len(b):cap(b)], p)
// increment buffered data length
n, err := b.Write(p)
written += n
bp.bufLen += n
// include written data in last buffer
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
// errBufferFull is an error we expect to get if the buffer is full
if err != nil && err != errBufferFull {
bp.wait.Broadcast()
bp.mu.Unlock()
return written, err
}
// if there was enough room to write all then break
if len(p) == n {
@ -45,45 +83,104 @@ func (bp *BytesPipe) Write(p []byte) (n int, err error) {
// more data: write to the next slice
p = p[n:]
// allocate slice that has twice the size of the last unless maximum reached
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
if maxCap < nextCap {
// make sure the buffer doesn't grow too big from this write
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
if bp.closeErr != nil {
continue loop0
}
}
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
if nextCap > maxCap {
nextCap = maxCap
}
// add new byte slice to the buffers slice and continue writing
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
bp.buf = append(bp.buf, getBuffer(nextCap))
}
return
bp.wait.Broadcast()
bp.mu.Unlock()
return written, nil
}
func (bp *BytesPipe) len() int {
return bp.bufLen - bp.lastRead
// CloseWithError causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) CloseWithError(err error) error {
bp.mu.Lock()
if err != nil {
bp.closeErr = err
} else {
bp.closeErr = io.EOF
}
bp.wait.Broadcast()
bp.mu.Unlock()
return nil
}
// Close causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
for {
read := copy(p, bp.buf[0][bp.lastRead:])
n += read
bp.lastRead += read
if bp.len() == 0 {
// we have read everything. reset to the beginning.
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = bp.buf[0][:0]
break
bp.mu.Lock()
if bp.bufLen == 0 {
if bp.closeErr != nil {
bp.mu.Unlock()
return 0, bp.closeErr
}
// break if everything was read
bp.wait.Wait()
if bp.bufLen == 0 && bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
}
}
for bp.bufLen > 0 {
b := bp.buf[0]
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
n += read
bp.bufLen -= read
if b.Len() == 0 {
// it's empty so return it to the pool and move to the next one
returnBuffer(b)
bp.buf[0] = nil
bp.buf = bp.buf[1:]
}
if len(p) == read {
break
}
// more buffered data and more asked. read from next slice.
p = p[read:]
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = nil // throw away old slice
bp.buf = bp.buf[1:] // switch to next
}
bp.wait.Broadcast()
bp.mu.Unlock()
return
}
func returnBuffer(b *fixedBuffer) {
b.Reset()
bufPoolsLock.Lock()
pool := bufPools[b.Cap()]
bufPoolsLock.Unlock()
if pool != nil {
pool.Put(b)
}
}
func getBuffer(size int) *fixedBuffer {
bufPoolsLock.Lock()
pool, ok := bufPools[size]
if !ok {
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
bufPools[size] = pool
}
bufPoolsLock.Unlock()
return pool.Get().(*fixedBuffer)
}

View File

@ -0,0 +1,82 @@
package ioutils
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
abspath, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
return &atomicFileWriter{
f: f,
fn: abspath,
perm: perm,
}, nil
}
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := NewAtomicFileWriter(filename, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
f.(*atomicFileWriter).writeErr = err
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
type atomicFileWriter struct {
f *os.File
fn string
writeErr error
perm os.FileMode
}
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
n, err := w.f.Write(dt)
if err != nil {
w.writeErr = err
}
return n, err
}
func (w *atomicFileWriter) Close() (retErr error) {
defer func() {
if retErr != nil || w.writeErr != nil {
os.Remove(w.f.Name())
}
}()
if err := w.f.Sync(); err != nil {
w.f.Close()
return err
}
if err := w.f.Close(); err != nil {
return err
}
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
return err
}
if w.writeErr == nil {
return os.Rename(w.f.Name(), w.fn)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More