Update vendor directory and make necessary code changes
Fixes for new geth version
This commit is contained in:
parent
20ce0ab852
commit
36533f7c3f
84
Gopkg.toml
Normal file
84
Gopkg.toml
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
|
||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
|
||||||
|
|
||||||
|
[[override]]
|
||||||
|
name = "gopkg.in/fsnotify.v1"
|
||||||
|
source = "gopkg.in/fsnotify/fsnotify.v1"
|
||||||
|
|
||||||
|
[[override]]
|
||||||
|
name = "github.com/pressly/sup"
|
||||||
|
version = "0.5.3"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/onsi/ginkgo"
|
||||||
|
version = "1.4.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/lib/pq"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/sirupsen/logrus"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/spf13/cobra"
|
||||||
|
version = "0.0.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/ethereum/go-ethereum"
|
||||||
|
source = "github.com/vulcanize/go-ethereum"
|
||||||
|
branch = "rpc_statediffing"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/vulcanize/eth-block-extractor"
|
||||||
|
branch = "pair_with_syncAndPublish"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/ipfs/go-ipfs"
|
||||||
|
source = "github.com/vulcanize/go-ipfs"
|
||||||
|
branch = "postgres_update"
|
||||||
|
|
||||||
|
[[override]]
|
||||||
|
name = "github.com/dgraph-io/badger"
|
||||||
|
revision = "0ce1d2e26af1ba8b8a72ea864145a3e1e3b382cd"
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
||||||
|
|
||||||
|
[[prune.project]]
|
||||||
|
name = "github.com/ethereum/go-ethereum"
|
||||||
|
unused-packages = false
|
||||||
|
|
||||||
|
[[prune.project]]
|
||||||
|
name = "github.com/karalabe/hid"
|
||||||
|
unused-packages = false
|
||||||
|
|
||||||
|
[[prune.project]]
|
||||||
|
name = "github.com/karalabe/usb"
|
||||||
|
unused-packages = false
|
@ -150,6 +150,10 @@ func (blockChain *BlockChain) getPOAHeader(blockNumber int64) (header core.Heade
|
|||||||
if POAHeader.Number == nil {
|
if POAHeader.Number == nil {
|
||||||
return header, ErrEmptyHeader
|
return header, ErrEmptyHeader
|
||||||
}
|
}
|
||||||
|
time := POAHeader.Time.ToInt()
|
||||||
|
if time == nil {
|
||||||
|
time = big.NewInt(0)
|
||||||
|
}
|
||||||
return blockChain.headerConverter.Convert(&types.Header{
|
return blockChain.headerConverter.Convert(&types.Header{
|
||||||
ParentHash: POAHeader.ParentHash,
|
ParentHash: POAHeader.ParentHash,
|
||||||
UncleHash: POAHeader.UncleHash,
|
UncleHash: POAHeader.UncleHash,
|
||||||
|
@ -31,6 +31,7 @@ func (converter HeaderConverter) Convert(gethHeader *types.Header, blockHash str
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
strInt := strconv.FormatUint(gethHeader.Time, 10)
|
||||||
coreHeader := core.Header{
|
coreHeader := core.Header{
|
||||||
Hash: blockHash,
|
Hash: blockHash,
|
||||||
BlockNumber: gethHeader.Number.Int64(),
|
BlockNumber: gethHeader.Number.Int64(),
|
||||||
|
2
vendor/bazil.org/fuse/.gitattributes
generated
vendored
Normal file
2
vendor/bazil.org/fuse/.gitattributes
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
*.go filter=gofmt
|
||||||
|
*.cgo filter=gofmt
|
11
vendor/bazil.org/fuse/.gitignore
generated
vendored
Normal file
11
vendor/bazil.org/fuse/.gitignore
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
*~
|
||||||
|
.#*
|
||||||
|
## the next line needs to start with a backslash to avoid looking like
|
||||||
|
## a comment
|
||||||
|
\#*#
|
||||||
|
.*.swp
|
||||||
|
|
||||||
|
*.test
|
||||||
|
|
||||||
|
/clockfs
|
||||||
|
/hellofs
|
93
vendor/bazil.org/fuse/LICENSE
generated
vendored
Normal file
93
vendor/bazil.org/fuse/LICENSE
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
Copyright (c) 2013-2015 Tommi Virtanen.
|
||||||
|
Copyright (c) 2009, 2011, 2012 The Go Authors.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
The following included software components have additional copyright
|
||||||
|
notices and license terms that may differ from the above.
|
||||||
|
|
||||||
|
|
||||||
|
File fuse.go:
|
||||||
|
|
||||||
|
// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c,
|
||||||
|
// which carries this notice:
|
||||||
|
//
|
||||||
|
// The files in this directory are subject to the following license.
|
||||||
|
//
|
||||||
|
// The author of this software is Russ Cox.
|
||||||
|
//
|
||||||
|
// Copyright (c) 2006 Russ Cox
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose without fee is hereby granted, provided that this entire notice
|
||||||
|
// is included in all copies of any software which is or includes a copy
|
||||||
|
// or modification of this software and in all copies of the supporting
|
||||||
|
// documentation for such software.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
|
||||||
|
// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY
|
||||||
|
// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS
|
||||||
|
// FITNESS FOR ANY PARTICULAR PURPOSE.
|
||||||
|
|
||||||
|
|
||||||
|
File fuse_kernel.go:
|
||||||
|
|
||||||
|
// Derived from FUSE's fuse_kernel.h
|
||||||
|
/*
|
||||||
|
This file defines the kernel interface of FUSE
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||||
|
|
||||||
|
|
||||||
|
This -- and only this -- header file may also be distributed under
|
||||||
|
the terms of the BSD Licence as follows:
|
||||||
|
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||||
|
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||||
|
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGE.
|
||||||
|
*/
|
23
vendor/bazil.org/fuse/README.md
generated
vendored
Normal file
23
vendor/bazil.org/fuse/README.md
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
bazil.org/fuse -- Filesystems in Go
|
||||||
|
===================================
|
||||||
|
|
||||||
|
`bazil.org/fuse` is a Go library for writing FUSE userspace
|
||||||
|
filesystems.
|
||||||
|
|
||||||
|
It is a from-scratch implementation of the kernel-userspace
|
||||||
|
communication protocol, and does not use the C library from the
|
||||||
|
project called FUSE. `bazil.org/fuse` embraces Go fully for safety and
|
||||||
|
ease of programming.
|
||||||
|
|
||||||
|
Here’s how to get going:
|
||||||
|
|
||||||
|
go get bazil.org/fuse
|
||||||
|
|
||||||
|
Website: http://bazil.org/fuse/
|
||||||
|
|
||||||
|
Github repository: https://github.com/bazil/fuse
|
||||||
|
|
||||||
|
API docs: http://godoc.org/bazil.org/fuse
|
||||||
|
|
||||||
|
Our thanks to Russ Cox for his fuse library, which this project is
|
||||||
|
based on.
|
35
vendor/bazil.org/fuse/buffer.go
generated
vendored
Normal file
35
vendor/bazil.org/fuse/buffer.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// buffer provides a mechanism for constructing a message from
|
||||||
|
// multiple segments.
|
||||||
|
type buffer []byte
|
||||||
|
|
||||||
|
// alloc allocates size bytes and returns a pointer to the new
|
||||||
|
// segment.
|
||||||
|
func (w *buffer) alloc(size uintptr) unsafe.Pointer {
|
||||||
|
s := int(size)
|
||||||
|
if len(*w)+s > cap(*w) {
|
||||||
|
old := *w
|
||||||
|
*w = make([]byte, len(*w), 2*cap(*w)+s)
|
||||||
|
copy(*w, old)
|
||||||
|
}
|
||||||
|
l := len(*w)
|
||||||
|
*w = (*w)[:l+s]
|
||||||
|
return unsafe.Pointer(&(*w)[l])
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset clears out the contents of the buffer.
|
||||||
|
func (w *buffer) reset() {
|
||||||
|
for i := range (*w)[:cap(*w)] {
|
||||||
|
(*w)[i] = 0
|
||||||
|
}
|
||||||
|
*w = (*w)[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBuffer(extra uintptr) buffer {
|
||||||
|
const hdrSize = unsafe.Sizeof(outHeader{})
|
||||||
|
buf := make(buffer, hdrSize, hdrSize+extra)
|
||||||
|
return buf
|
||||||
|
}
|
21
vendor/bazil.org/fuse/debug.go
generated
vendored
Normal file
21
vendor/bazil.org/fuse/debug.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func stack() string {
|
||||||
|
buf := make([]byte, 1024)
|
||||||
|
return string(buf[:runtime.Stack(buf, false)])
|
||||||
|
}
|
||||||
|
|
||||||
|
func nop(msg interface{}) {}
|
||||||
|
|
||||||
|
// Debug is called to output debug messages, including protocol
|
||||||
|
// traces. The default behavior is to do nothing.
|
||||||
|
//
|
||||||
|
// The messages have human-friendly string representations and are
|
||||||
|
// safe to marshal to JSON.
|
||||||
|
//
|
||||||
|
// Implementations must not retain msg.
|
||||||
|
var Debug func(msg interface{}) = nop
|
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/error_darwin.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENOATTR = Errno(syscall.ENOATTR)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
errNoXattr = ENOATTR
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errnoNames[errNoXattr] = "ENOATTR"
|
||||||
|
}
|
15
vendor/bazil.org/fuse/error_freebsd.go
generated
vendored
Normal file
15
vendor/bazil.org/fuse/error_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENOATTR = Errno(syscall.ENOATTR)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
errNoXattr = ENOATTR
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errnoNames[errNoXattr] = "ENOATTR"
|
||||||
|
}
|
17
vendor/bazil.org/fuse/error_linux.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/error_linux.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ENODATA = Errno(syscall.ENODATA)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
errNoXattr = ENODATA
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
errnoNames[errNoXattr] = "ENODATA"
|
||||||
|
}
|
31
vendor/bazil.org/fuse/error_std.go
generated
vendored
Normal file
31
vendor/bazil.org/fuse/error_std.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
// There is very little commonality in extended attribute errors
|
||||||
|
// across platforms.
|
||||||
|
//
|
||||||
|
// getxattr return value for "extended attribute does not exist" is
|
||||||
|
// ENOATTR on OS X, and ENODATA on Linux and apparently at least
|
||||||
|
// NetBSD. There may be a #define ENOATTR on Linux too, but the value
|
||||||
|
// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no
|
||||||
|
// ENODATA, only ENOATTR. ENOATTR is not in any of the standards,
|
||||||
|
// ENODATA exists but is only used for STREAMs.
|
||||||
|
//
|
||||||
|
// Each platform will define it a errNoXattr constant, and this file
|
||||||
|
// will enforce that it implements the right interfaces and hide the
|
||||||
|
// implementation.
|
||||||
|
//
|
||||||
|
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html
|
||||||
|
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html
|
||||||
|
// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
||||||
|
// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2
|
||||||
|
// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html
|
||||||
|
|
||||||
|
// ErrNoXattr is a platform-independent error value meaning the
|
||||||
|
// extended attribute was not found. It can be used to respond to
|
||||||
|
// GetxattrRequest and such.
|
||||||
|
const ErrNoXattr = errNoXattr
|
||||||
|
|
||||||
|
var _ error = ErrNoXattr
|
||||||
|
var _ Errno = ErrNoXattr
|
||||||
|
var _ ErrorNumber = ErrNoXattr
|
1568
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
Normal file
1568
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
99
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
Normal file
99
vendor/bazil.org/fuse/fs/tree.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
// FUSE directory tree, for servers that wish to use it with the service loop.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
pathpkg "path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bazil.org/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Tree implements a basic read-only directory tree for FUSE.
|
||||||
|
// The Nodes contained in it may still be writable.
|
||||||
|
type Tree struct {
|
||||||
|
tree
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) Root() (Node, error) {
|
||||||
|
return &t.tree, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the path to the tree, resolving to the given node.
|
||||||
|
// If path or a prefix of path has already been added to the tree,
|
||||||
|
// Add panics.
|
||||||
|
//
|
||||||
|
// Add is only safe to call before starting to serve requests.
|
||||||
|
func (t *Tree) Add(path string, node Node) {
|
||||||
|
path = pathpkg.Clean("/" + path)[1:]
|
||||||
|
elems := strings.Split(path, "/")
|
||||||
|
dir := Node(&t.tree)
|
||||||
|
for i, elem := range elems {
|
||||||
|
dt, ok := dir.(*tree)
|
||||||
|
if !ok {
|
||||||
|
panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path)
|
||||||
|
}
|
||||||
|
n := dt.lookup(elem)
|
||||||
|
if n != nil {
|
||||||
|
if i+1 == len(elems) {
|
||||||
|
panic("fuse: Tree.Add for " + path + " conflicts with " + elem)
|
||||||
|
}
|
||||||
|
dir = n
|
||||||
|
} else {
|
||||||
|
if i+1 == len(elems) {
|
||||||
|
dt.add(elem, node)
|
||||||
|
} else {
|
||||||
|
dir = &tree{}
|
||||||
|
dt.add(elem, dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type treeDir struct {
|
||||||
|
name string
|
||||||
|
node Node
|
||||||
|
}
|
||||||
|
|
||||||
|
type tree struct {
|
||||||
|
dir []treeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) lookup(name string) Node {
|
||||||
|
for _, d := range t.dir {
|
||||||
|
if d.name == name {
|
||||||
|
return d.node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) add(name string, n Node) {
|
||||||
|
t.dir = append(t.dir, treeDir{name, n})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||||
|
a.Mode = os.ModeDir | 0555
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) Lookup(ctx context.Context, name string) (Node, error) {
|
||||||
|
n := t.lookup(name)
|
||||||
|
if n != nil {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
return nil, fuse.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||||
|
var out []fuse.Dirent
|
||||||
|
for _, d := range t.dir {
|
||||||
|
out = append(out, fuse.Dirent{Name: d.name})
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
2304
vendor/bazil.org/fuse/fuse.go
generated
vendored
Normal file
2304
vendor/bazil.org/fuse/fuse.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
Normal file
9
vendor/bazil.org/fuse/fuse_darwin.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
// Maximum file write size we are prepared to receive from the kernel.
|
||||||
|
//
|
||||||
|
// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will
|
||||||
|
// forcibly close the /dev/fuse file descriptor on a Setxattr with a
|
||||||
|
// 16MB value. See TestSetxattr16MB and
|
||||||
|
// https://github.com/bazil/fuse/issues/42
|
||||||
|
const maxWrite = 16 * 1024 * 1024
|
6
vendor/bazil.org/fuse/fuse_freebsd.go
generated
vendored
Normal file
6
vendor/bazil.org/fuse/fuse_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
// Maximum file write size we are prepared to receive from the kernel.
|
||||||
|
//
|
||||||
|
// This number is just a guess.
|
||||||
|
const maxWrite = 128 * 1024
|
774
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
Normal file
774
vendor/bazil.org/fuse/fuse_kernel.go
generated
vendored
Normal file
@ -0,0 +1,774 @@
|
|||||||
|
// See the file LICENSE for copyright and licensing information.
|
||||||
|
|
||||||
|
// Derived from FUSE's fuse_kernel.h, which carries this notice:
|
||||||
|
/*
|
||||||
|
This file defines the kernel interface of FUSE
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
|
||||||
|
|
||||||
|
|
||||||
|
This -- and only this -- header file may also be distributed under
|
||||||
|
the terms of the BSD Licence as follows:
|
||||||
|
|
||||||
|
Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions
|
||||||
|
are met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||||
|
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||||
|
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The FUSE version implemented by the package.
|
||||||
|
const (
|
||||||
|
protoVersionMinMajor = 7
|
||||||
|
protoVersionMinMinor = 8
|
||||||
|
protoVersionMaxMajor = 7
|
||||||
|
protoVersionMaxMinor = 12
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rootID = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
type kstatfs struct {
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail uint64
|
||||||
|
Files uint64
|
||||||
|
Ffree uint64
|
||||||
|
Bsize uint32
|
||||||
|
Namelen uint32
|
||||||
|
Frsize uint32
|
||||||
|
_ uint32
|
||||||
|
Spare [6]uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileLock struct {
|
||||||
|
Start uint64
|
||||||
|
End uint64
|
||||||
|
Type uint32
|
||||||
|
Pid uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetattrFlags are bit flags that can be seen in GetattrRequest.
|
||||||
|
type GetattrFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Indicates the handle is valid.
|
||||||
|
GetattrFh GetattrFlags = 1 << 0
|
||||||
|
)
|
||||||
|
|
||||||
|
var getattrFlagsNames = []flagName{
|
||||||
|
{uint32(GetattrFh), "GetattrFh"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl GetattrFlags) String() string {
|
||||||
|
return flagString(uint32(fl), getattrFlagsNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The SetattrValid are bit flags describing which fields in the SetattrRequest
|
||||||
|
// are included in the change.
|
||||||
|
type SetattrValid uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
SetattrMode SetattrValid = 1 << 0
|
||||||
|
SetattrUid SetattrValid = 1 << 1
|
||||||
|
SetattrGid SetattrValid = 1 << 2
|
||||||
|
SetattrSize SetattrValid = 1 << 3
|
||||||
|
SetattrAtime SetattrValid = 1 << 4
|
||||||
|
SetattrMtime SetattrValid = 1 << 5
|
||||||
|
SetattrHandle SetattrValid = 1 << 6
|
||||||
|
|
||||||
|
// Linux only(?)
|
||||||
|
SetattrAtimeNow SetattrValid = 1 << 7
|
||||||
|
SetattrMtimeNow SetattrValid = 1 << 8
|
||||||
|
SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
SetattrCrtime SetattrValid = 1 << 28
|
||||||
|
SetattrChgtime SetattrValid = 1 << 29
|
||||||
|
SetattrBkuptime SetattrValid = 1 << 30
|
||||||
|
SetattrFlags SetattrValid = 1 << 31
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 }
|
||||||
|
func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 }
|
||||||
|
func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 }
|
||||||
|
func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 }
|
||||||
|
func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 }
|
||||||
|
func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 }
|
||||||
|
func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 }
|
||||||
|
func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 }
|
||||||
|
func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 }
|
||||||
|
func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 }
|
||||||
|
func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 }
|
||||||
|
func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 }
|
||||||
|
func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 }
|
||||||
|
func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 }
|
||||||
|
|
||||||
|
func (fl SetattrValid) String() string {
|
||||||
|
return flagString(uint32(fl), setattrValidNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var setattrValidNames = []flagName{
|
||||||
|
{uint32(SetattrMode), "SetattrMode"},
|
||||||
|
{uint32(SetattrUid), "SetattrUid"},
|
||||||
|
{uint32(SetattrGid), "SetattrGid"},
|
||||||
|
{uint32(SetattrSize), "SetattrSize"},
|
||||||
|
{uint32(SetattrAtime), "SetattrAtime"},
|
||||||
|
{uint32(SetattrMtime), "SetattrMtime"},
|
||||||
|
{uint32(SetattrHandle), "SetattrHandle"},
|
||||||
|
{uint32(SetattrAtimeNow), "SetattrAtimeNow"},
|
||||||
|
{uint32(SetattrMtimeNow), "SetattrMtimeNow"},
|
||||||
|
{uint32(SetattrLockOwner), "SetattrLockOwner"},
|
||||||
|
{uint32(SetattrCrtime), "SetattrCrtime"},
|
||||||
|
{uint32(SetattrChgtime), "SetattrChgtime"},
|
||||||
|
{uint32(SetattrBkuptime), "SetattrBkuptime"},
|
||||||
|
{uint32(SetattrFlags), "SetattrFlags"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flags that can be seen in OpenRequest.Flags.
|
||||||
|
const (
|
||||||
|
// Access modes. These are not 1-bit flags, but alternatives where
|
||||||
|
// only one can be chosen. See the IsReadOnly etc convenience
|
||||||
|
// methods.
|
||||||
|
OpenReadOnly OpenFlags = syscall.O_RDONLY
|
||||||
|
OpenWriteOnly OpenFlags = syscall.O_WRONLY
|
||||||
|
OpenReadWrite OpenFlags = syscall.O_RDWR
|
||||||
|
|
||||||
|
// File was opened in append-only mode, all writes will go to end
|
||||||
|
// of file. OS X does not provide this information.
|
||||||
|
OpenAppend OpenFlags = syscall.O_APPEND
|
||||||
|
OpenCreate OpenFlags = syscall.O_CREAT
|
||||||
|
OpenDirectory OpenFlags = syscall.O_DIRECTORY
|
||||||
|
OpenExclusive OpenFlags = syscall.O_EXCL
|
||||||
|
OpenNonblock OpenFlags = syscall.O_NONBLOCK
|
||||||
|
OpenSync OpenFlags = syscall.O_SYNC
|
||||||
|
OpenTruncate OpenFlags = syscall.O_TRUNC
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpenAccessModeMask is a bitmask that separates the access mode
|
||||||
|
// from the other flags in OpenFlags.
|
||||||
|
const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE
|
||||||
|
|
||||||
|
// OpenFlags are the O_FOO flags passed to open/create/etc calls. For
|
||||||
|
// example, os.O_WRONLY | os.O_APPEND.
|
||||||
|
type OpenFlags uint32
|
||||||
|
|
||||||
|
func (fl OpenFlags) String() string {
|
||||||
|
// O_RDONLY, O_RWONLY, O_RDWR are not flags
|
||||||
|
s := accModeName(fl & OpenAccessModeMask)
|
||||||
|
flags := uint32(fl &^ OpenAccessModeMask)
|
||||||
|
if flags != 0 {
|
||||||
|
s = s + "+" + flagString(flags, openFlagNames)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if OpenReadOnly is set.
|
||||||
|
func (fl OpenFlags) IsReadOnly() bool {
|
||||||
|
return fl&OpenAccessModeMask == OpenReadOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if OpenWriteOnly is set.
|
||||||
|
func (fl OpenFlags) IsWriteOnly() bool {
|
||||||
|
return fl&OpenAccessModeMask == OpenWriteOnly
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if OpenReadWrite is set.
|
||||||
|
func (fl OpenFlags) IsReadWrite() bool {
|
||||||
|
return fl&OpenAccessModeMask == OpenReadWrite
|
||||||
|
}
|
||||||
|
|
||||||
|
func accModeName(flags OpenFlags) string {
|
||||||
|
switch flags {
|
||||||
|
case OpenReadOnly:
|
||||||
|
return "OpenReadOnly"
|
||||||
|
case OpenWriteOnly:
|
||||||
|
return "OpenWriteOnly"
|
||||||
|
case OpenReadWrite:
|
||||||
|
return "OpenReadWrite"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var openFlagNames = []flagName{
|
||||||
|
{uint32(OpenAppend), "OpenAppend"},
|
||||||
|
{uint32(OpenCreate), "OpenCreate"},
|
||||||
|
{uint32(OpenDirectory), "OpenDirectory"},
|
||||||
|
{uint32(OpenExclusive), "OpenExclusive"},
|
||||||
|
{uint32(OpenNonblock), "OpenNonblock"},
|
||||||
|
{uint32(OpenSync), "OpenSync"},
|
||||||
|
{uint32(OpenTruncate), "OpenTruncate"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// The OpenResponseFlags are returned in the OpenResponse.
|
||||||
|
type OpenResponseFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file
|
||||||
|
OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open
|
||||||
|
OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X)
|
||||||
|
|
||||||
|
OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X
|
||||||
|
OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fl OpenResponseFlags) String() string {
|
||||||
|
return flagString(uint32(fl), openResponseFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var openResponseFlagNames = []flagName{
|
||||||
|
{uint32(OpenDirectIO), "OpenDirectIO"},
|
||||||
|
{uint32(OpenKeepCache), "OpenKeepCache"},
|
||||||
|
{uint32(OpenNonSeekable), "OpenNonSeekable"},
|
||||||
|
{uint32(OpenPurgeAttr), "OpenPurgeAttr"},
|
||||||
|
{uint32(OpenPurgeUBC), "OpenPurgeUBC"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// The InitFlags are used in the Init exchange.
|
||||||
|
type InitFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
InitAsyncRead InitFlags = 1 << 0
|
||||||
|
InitPosixLocks InitFlags = 1 << 1
|
||||||
|
InitFileOps InitFlags = 1 << 2
|
||||||
|
InitAtomicTrunc InitFlags = 1 << 3
|
||||||
|
InitExportSupport InitFlags = 1 << 4
|
||||||
|
InitBigWrites InitFlags = 1 << 5
|
||||||
|
// Do not mask file access modes with umask. Not supported on OS X.
|
||||||
|
InitDontMask InitFlags = 1 << 6
|
||||||
|
InitSpliceWrite InitFlags = 1 << 7
|
||||||
|
InitSpliceMove InitFlags = 1 << 8
|
||||||
|
InitSpliceRead InitFlags = 1 << 9
|
||||||
|
InitFlockLocks InitFlags = 1 << 10
|
||||||
|
InitHasIoctlDir InitFlags = 1 << 11
|
||||||
|
InitAutoInvalData InitFlags = 1 << 12
|
||||||
|
InitDoReaddirplus InitFlags = 1 << 13
|
||||||
|
InitReaddirplusAuto InitFlags = 1 << 14
|
||||||
|
InitAsyncDIO InitFlags = 1 << 15
|
||||||
|
InitWritebackCache InitFlags = 1 << 16
|
||||||
|
InitNoOpenSupport InitFlags = 1 << 17
|
||||||
|
|
||||||
|
InitCaseSensitive InitFlags = 1 << 29 // OS X only
|
||||||
|
InitVolRename InitFlags = 1 << 30 // OS X only
|
||||||
|
InitXtimes InitFlags = 1 << 31 // OS X only
|
||||||
|
)
|
||||||
|
|
||||||
|
type flagName struct {
|
||||||
|
bit uint32
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
var initFlagNames = []flagName{
|
||||||
|
{uint32(InitAsyncRead), "InitAsyncRead"},
|
||||||
|
{uint32(InitPosixLocks), "InitPosixLocks"},
|
||||||
|
{uint32(InitFileOps), "InitFileOps"},
|
||||||
|
{uint32(InitAtomicTrunc), "InitAtomicTrunc"},
|
||||||
|
{uint32(InitExportSupport), "InitExportSupport"},
|
||||||
|
{uint32(InitBigWrites), "InitBigWrites"},
|
||||||
|
{uint32(InitDontMask), "InitDontMask"},
|
||||||
|
{uint32(InitSpliceWrite), "InitSpliceWrite"},
|
||||||
|
{uint32(InitSpliceMove), "InitSpliceMove"},
|
||||||
|
{uint32(InitSpliceRead), "InitSpliceRead"},
|
||||||
|
{uint32(InitFlockLocks), "InitFlockLocks"},
|
||||||
|
{uint32(InitHasIoctlDir), "InitHasIoctlDir"},
|
||||||
|
{uint32(InitAutoInvalData), "InitAutoInvalData"},
|
||||||
|
{uint32(InitDoReaddirplus), "InitDoReaddirplus"},
|
||||||
|
{uint32(InitReaddirplusAuto), "InitReaddirplusAuto"},
|
||||||
|
{uint32(InitAsyncDIO), "InitAsyncDIO"},
|
||||||
|
{uint32(InitWritebackCache), "InitWritebackCache"},
|
||||||
|
{uint32(InitNoOpenSupport), "InitNoOpenSupport"},
|
||||||
|
|
||||||
|
{uint32(InitCaseSensitive), "InitCaseSensitive"},
|
||||||
|
{uint32(InitVolRename), "InitVolRename"},
|
||||||
|
{uint32(InitXtimes), "InitXtimes"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl InitFlags) String() string {
|
||||||
|
return flagString(uint32(fl), initFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
func flagString(f uint32, names []flagName) string {
|
||||||
|
var s string
|
||||||
|
|
||||||
|
if f == 0 {
|
||||||
|
return "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range names {
|
||||||
|
if f&n.bit != 0 {
|
||||||
|
s += "+" + n.name
|
||||||
|
f &^= n.bit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f != 0 {
|
||||||
|
s += fmt.Sprintf("%+#x", f)
|
||||||
|
}
|
||||||
|
return s[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ReleaseFlags are used in the Release exchange.
|
||||||
|
type ReleaseFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
ReleaseFlush ReleaseFlags = 1 << 0
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fl ReleaseFlags) String() string {
|
||||||
|
return flagString(uint32(fl), releaseFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
var releaseFlagNames = []flagName{
|
||||||
|
{uint32(ReleaseFlush), "ReleaseFlush"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opcodes
|
||||||
|
const (
|
||||||
|
opLookup = 1
|
||||||
|
opForget = 2 // no reply
|
||||||
|
opGetattr = 3
|
||||||
|
opSetattr = 4
|
||||||
|
opReadlink = 5
|
||||||
|
opSymlink = 6
|
||||||
|
opMknod = 8
|
||||||
|
opMkdir = 9
|
||||||
|
opUnlink = 10
|
||||||
|
opRmdir = 11
|
||||||
|
opRename = 12
|
||||||
|
opLink = 13
|
||||||
|
opOpen = 14
|
||||||
|
opRead = 15
|
||||||
|
opWrite = 16
|
||||||
|
opStatfs = 17
|
||||||
|
opRelease = 18
|
||||||
|
opFsync = 20
|
||||||
|
opSetxattr = 21
|
||||||
|
opGetxattr = 22
|
||||||
|
opListxattr = 23
|
||||||
|
opRemovexattr = 24
|
||||||
|
opFlush = 25
|
||||||
|
opInit = 26
|
||||||
|
opOpendir = 27
|
||||||
|
opReaddir = 28
|
||||||
|
opReleasedir = 29
|
||||||
|
opFsyncdir = 30
|
||||||
|
opGetlk = 31
|
||||||
|
opSetlk = 32
|
||||||
|
opSetlkw = 33
|
||||||
|
opAccess = 34
|
||||||
|
opCreate = 35
|
||||||
|
opInterrupt = 36
|
||||||
|
opBmap = 37
|
||||||
|
opDestroy = 38
|
||||||
|
opIoctl = 39 // Linux?
|
||||||
|
opPoll = 40 // Linux?
|
||||||
|
|
||||||
|
// OS X
|
||||||
|
opSetvolname = 61
|
||||||
|
opGetxtimes = 62
|
||||||
|
opExchange = 63
|
||||||
|
)
|
||||||
|
|
||||||
|
type entryOut struct {
|
||||||
|
Nodeid uint64 // Inode ID
|
||||||
|
Generation uint64 // Inode generation
|
||||||
|
EntryValid uint64 // Cache timeout for the name
|
||||||
|
AttrValid uint64 // Cache timeout for the attributes
|
||||||
|
EntryValidNsec uint32
|
||||||
|
AttrValidNsec uint32
|
||||||
|
Attr attr
|
||||||
|
}
|
||||||
|
|
||||||
|
func entryOutSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(entryOut{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type forgetIn struct {
|
||||||
|
Nlookup uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type getattrIn struct {
|
||||||
|
GetattrFlags uint32
|
||||||
|
_ uint32
|
||||||
|
Fh uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type attrOut struct {
|
||||||
|
AttrValid uint64 // Cache timeout for the attributes
|
||||||
|
AttrValidNsec uint32
|
||||||
|
_ uint32
|
||||||
|
Attr attr
|
||||||
|
}
|
||||||
|
|
||||||
|
func attrOutSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(attrOut{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OS X
|
||||||
|
type getxtimesOut struct {
|
||||||
|
Bkuptime uint64
|
||||||
|
Crtime uint64
|
||||||
|
BkuptimeNsec uint32
|
||||||
|
CrtimeNsec uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type mknodIn struct {
|
||||||
|
Mode uint32
|
||||||
|
Rdev uint32
|
||||||
|
Umask uint32
|
||||||
|
_ uint32
|
||||||
|
// "filename\x00" follows.
|
||||||
|
}
|
||||||
|
|
||||||
|
func mknodInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 12}):
|
||||||
|
return unsafe.Offsetof(mknodIn{}.Umask)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(mknodIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type mkdirIn struct {
|
||||||
|
Mode uint32
|
||||||
|
Umask uint32
|
||||||
|
// filename follows
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkdirInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 12}):
|
||||||
|
return unsafe.Offsetof(mkdirIn{}.Umask) + 4
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(mkdirIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type renameIn struct {
|
||||||
|
Newdir uint64
|
||||||
|
// "oldname\x00newname\x00" follows
|
||||||
|
}
|
||||||
|
|
||||||
|
// OS X
|
||||||
|
type exchangeIn struct {
|
||||||
|
Olddir uint64
|
||||||
|
Newdir uint64
|
||||||
|
Options uint64
|
||||||
|
// "oldname\x00newname\x00" follows
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkIn struct {
|
||||||
|
Oldnodeid uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrInCommon struct {
|
||||||
|
Valid uint32
|
||||||
|
_ uint32
|
||||||
|
Fh uint64
|
||||||
|
Size uint64
|
||||||
|
LockOwner uint64 // unused on OS X?
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Unused2 uint64
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
Unused3 uint32
|
||||||
|
Mode uint32
|
||||||
|
Unused4 uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Unused5 uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type openIn struct {
|
||||||
|
Flags uint32
|
||||||
|
Unused uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type openOut struct {
|
||||||
|
Fh uint64
|
||||||
|
OpenFlags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type createIn struct {
|
||||||
|
Flags uint32
|
||||||
|
Mode uint32
|
||||||
|
Umask uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func createInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 12}):
|
||||||
|
return unsafe.Offsetof(createIn{}.Umask)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(createIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type releaseIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Flags uint32
|
||||||
|
ReleaseFlags uint32
|
||||||
|
LockOwner uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type flushIn struct {
|
||||||
|
Fh uint64
|
||||||
|
FlushFlags uint32
|
||||||
|
_ uint32
|
||||||
|
LockOwner uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type readIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Offset uint64
|
||||||
|
Size uint32
|
||||||
|
ReadFlags uint32
|
||||||
|
LockOwner uint64
|
||||||
|
Flags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func readInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(readIn{}.ReadFlags) + 4
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(readIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ReadFlags are passed in ReadRequest.
|
||||||
|
type ReadFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LockOwner field is valid.
|
||||||
|
ReadLockOwner ReadFlags = 1 << 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var readFlagNames = []flagName{
|
||||||
|
{uint32(ReadLockOwner), "ReadLockOwner"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl ReadFlags) String() string {
|
||||||
|
return flagString(uint32(fl), readFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Offset uint64
|
||||||
|
Size uint32
|
||||||
|
WriteFlags uint32
|
||||||
|
LockOwner uint64
|
||||||
|
Flags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(writeIn{}.LockOwner)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(writeIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type writeOut struct {
|
||||||
|
Size uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// The WriteFlags are passed in WriteRequest.
|
||||||
|
type WriteFlags uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
WriteCache WriteFlags = 1 << 0
|
||||||
|
// LockOwner field is valid.
|
||||||
|
WriteLockOwner WriteFlags = 1 << 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var writeFlagNames = []flagName{
|
||||||
|
{uint32(WriteCache), "WriteCache"},
|
||||||
|
{uint32(WriteLockOwner), "WriteLockOwner"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fl WriteFlags) String() string {
|
||||||
|
return flagString(uint32(fl), writeFlagNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
const compatStatfsSize = 48
|
||||||
|
|
||||||
|
type statfsOut struct {
|
||||||
|
St kstatfs
|
||||||
|
}
|
||||||
|
|
||||||
|
type fsyncIn struct {
|
||||||
|
Fh uint64
|
||||||
|
FsyncFlags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrInCommon struct {
|
||||||
|
Size uint32
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (setxattrInCommon) position() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrInCommon struct {
|
||||||
|
Size uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (getxattrInCommon) position() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrOut struct {
|
||||||
|
Size uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type lkIn struct {
|
||||||
|
Fh uint64
|
||||||
|
Owner uint64
|
||||||
|
Lk fileLock
|
||||||
|
LkFlags uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func lkInSize(p Protocol) uintptr {
|
||||||
|
switch {
|
||||||
|
case p.LT(Protocol{7, 9}):
|
||||||
|
return unsafe.Offsetof(lkIn{}.LkFlags)
|
||||||
|
default:
|
||||||
|
return unsafe.Sizeof(lkIn{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type lkOut struct {
|
||||||
|
Lk fileLock
|
||||||
|
}
|
||||||
|
|
||||||
|
type accessIn struct {
|
||||||
|
Mask uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type initIn struct {
|
||||||
|
Major uint32
|
||||||
|
Minor uint32
|
||||||
|
MaxReadahead uint32
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
const initInSize = int(unsafe.Sizeof(initIn{}))
|
||||||
|
|
||||||
|
type initOut struct {
|
||||||
|
Major uint32
|
||||||
|
Minor uint32
|
||||||
|
MaxReadahead uint32
|
||||||
|
Flags uint32
|
||||||
|
Unused uint32
|
||||||
|
MaxWrite uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type interruptIn struct {
|
||||||
|
Unique uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type bmapIn struct {
|
||||||
|
Block uint64
|
||||||
|
BlockSize uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type bmapOut struct {
|
||||||
|
Block uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type inHeader struct {
|
||||||
|
Len uint32
|
||||||
|
Opcode uint32
|
||||||
|
Unique uint64
|
||||||
|
Nodeid uint64
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Pid uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
const inHeaderSize = int(unsafe.Sizeof(inHeader{}))
|
||||||
|
|
||||||
|
type outHeader struct {
|
||||||
|
Len uint32
|
||||||
|
Error int32
|
||||||
|
Unique uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type dirent struct {
|
||||||
|
Ino uint64
|
||||||
|
Off uint64
|
||||||
|
Namelen uint32
|
||||||
|
Type uint32
|
||||||
|
Name [0]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
const direntSize = 8 + 8 + 4 + 4
|
||||||
|
|
||||||
|
const (
|
||||||
|
notifyCodePoll int32 = 1
|
||||||
|
notifyCodeInvalInode int32 = 2
|
||||||
|
notifyCodeInvalEntry int32 = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
type notifyInvalInodeOut struct {
|
||||||
|
Ino uint64
|
||||||
|
Off int64
|
||||||
|
Len int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type notifyInvalEntryOut struct {
|
||||||
|
Parent uint64
|
||||||
|
Namelen uint32
|
||||||
|
_ uint32
|
||||||
|
}
|
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
Normal file
88
vendor/bazil.org/fuse/fuse_kernel_darwin.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type attr struct {
|
||||||
|
Ino uint64
|
||||||
|
Size uint64
|
||||||
|
Blocks uint64
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Ctime uint64
|
||||||
|
Crtime_ uint64 // OS X only
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
CtimeNsec uint32
|
||||||
|
CrtimeNsec uint32 // OS X only
|
||||||
|
Mode uint32
|
||||||
|
Nlink uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Rdev uint32
|
||||||
|
Flags_ uint32 // OS X only; see chflags(2)
|
||||||
|
Blksize uint32
|
||||||
|
padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||||
|
a.Crtime_, a.CrtimeNsec = s, ns
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetFlags(f uint32) {
|
||||||
|
a.Flags_ = f
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrIn struct {
|
||||||
|
setattrInCommon
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
Bkuptime_ uint64
|
||||||
|
Chgtime_ uint64
|
||||||
|
Crtime uint64
|
||||||
|
BkuptimeNsec uint32
|
||||||
|
ChgtimeNsec uint32
|
||||||
|
CrtimeNsec uint32
|
||||||
|
Flags_ uint32 // see chflags(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) BkupTime() time.Time {
|
||||||
|
return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Chgtime() time.Time {
|
||||||
|
return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Flags() uint32 {
|
||||||
|
return in.Flags_
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFlags(flags uint32) OpenFlags {
|
||||||
|
return OpenFlags(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrIn struct {
|
||||||
|
getxattrInCommon
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
Position uint32
|
||||||
|
Padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *getxattrIn) position() uint32 {
|
||||||
|
return g.Position
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrIn struct {
|
||||||
|
setxattrInCommon
|
||||||
|
|
||||||
|
// OS X only
|
||||||
|
Position uint32
|
||||||
|
Padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setxattrIn) position() uint32 {
|
||||||
|
return s.Position
|
||||||
|
}
|
62
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
Normal file
62
vendor/bazil.org/fuse/fuse_kernel_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type attr struct {
|
||||||
|
Ino uint64
|
||||||
|
Size uint64
|
||||||
|
Blocks uint64
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Ctime uint64
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
CtimeNsec uint32
|
||||||
|
Mode uint32
|
||||||
|
Nlink uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Rdev uint32
|
||||||
|
Blksize uint32
|
||||||
|
padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) Crtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||||
|
// ignored on freebsd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetFlags(f uint32) {
|
||||||
|
// ignored on freebsd
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrIn struct {
|
||||||
|
setattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) BkupTime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Chgtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Flags() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFlags(flags uint32) OpenFlags {
|
||||||
|
return OpenFlags(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrIn struct {
|
||||||
|
getxattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrIn struct {
|
||||||
|
setxattrInCommon
|
||||||
|
}
|
70
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
Normal file
70
vendor/bazil.org/fuse/fuse_kernel_linux.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type attr struct {
|
||||||
|
Ino uint64
|
||||||
|
Size uint64
|
||||||
|
Blocks uint64
|
||||||
|
Atime uint64
|
||||||
|
Mtime uint64
|
||||||
|
Ctime uint64
|
||||||
|
AtimeNsec uint32
|
||||||
|
MtimeNsec uint32
|
||||||
|
CtimeNsec uint32
|
||||||
|
Mode uint32
|
||||||
|
Nlink uint32
|
||||||
|
Uid uint32
|
||||||
|
Gid uint32
|
||||||
|
Rdev uint32
|
||||||
|
Blksize uint32
|
||||||
|
padding uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) Crtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetCrtime(s uint64, ns uint32) {
|
||||||
|
// Ignored on Linux.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *attr) SetFlags(f uint32) {
|
||||||
|
// Ignored on Linux.
|
||||||
|
}
|
||||||
|
|
||||||
|
type setattrIn struct {
|
||||||
|
setattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) BkupTime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Chgtime() time.Time {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (in *setattrIn) Flags() uint32 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func openFlags(flags uint32) OpenFlags {
|
||||||
|
// on amd64, the 32-bit O_LARGEFILE flag is always seen;
|
||||||
|
// on i386, the flag probably depends on the app
|
||||||
|
// requesting, but in any case should be utterly
|
||||||
|
// uninteresting to us here; our kernel protocol messages
|
||||||
|
// are not directly related to the client app's kernel
|
||||||
|
// API/ABI
|
||||||
|
flags &^= 0x8000
|
||||||
|
|
||||||
|
return OpenFlags(flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
type getxattrIn struct {
|
||||||
|
getxattrInCommon
|
||||||
|
}
|
||||||
|
|
||||||
|
type setxattrIn struct {
|
||||||
|
setxattrInCommon
|
||||||
|
}
|
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
Normal file
1
vendor/bazil.org/fuse/fuse_kernel_std.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
package fuse
|
7
vendor/bazil.org/fuse/fuse_linux.go
generated
vendored
Normal file
7
vendor/bazil.org/fuse/fuse_linux.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
// Maximum file write size we are prepared to receive from the kernel.
|
||||||
|
//
|
||||||
|
// Linux 4.2.0 has been observed to cap this value at 128kB
|
||||||
|
// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages).
|
||||||
|
const maxWrite = 128 * 1024
|
20
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
Normal file
20
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package fuseutil // import "bazil.org/fuse/fuseutil"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bazil.org/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandleRead handles a read request assuming that data is the entire file content.
|
||||||
|
// It adjusts the amount returned in resp according to req.Offset and req.Size.
|
||||||
|
func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) {
|
||||||
|
if req.Offset >= int64(len(data)) {
|
||||||
|
data = nil
|
||||||
|
} else {
|
||||||
|
data = data[req.Offset:]
|
||||||
|
}
|
||||||
|
if len(data) > req.Size {
|
||||||
|
data = data[:req.Size]
|
||||||
|
}
|
||||||
|
n := copy(resp.Data[:req.Size], data)
|
||||||
|
resp.Data = resp.Data[:n]
|
||||||
|
}
|
38
vendor/bazil.org/fuse/mount.go
generated
vendored
Normal file
38
vendor/bazil.org/fuse/mount.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrOSXFUSENotFound is returned from Mount when the OSXFUSE
|
||||||
|
// installation is not detected.
|
||||||
|
//
|
||||||
|
// Only happens on OS X. Make sure OSXFUSE is installed, or see
|
||||||
|
// OSXFUSELocations for customization.
|
||||||
|
ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE")
|
||||||
|
)
|
||||||
|
|
||||||
|
func neverIgnoreLine(line string) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if ignore(line) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Printf("%s: %s", prefix, line)
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
log.Printf("%s, error reading: %v", prefix, err)
|
||||||
|
}
|
||||||
|
}
|
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
Normal file
208
vendor/bazil.org/fuse/mount_darwin.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoAvail = errors.New("no available fuse devices")
|
||||||
|
errNotLoaded = errors.New("osxfuse is not loaded")
|
||||||
|
)
|
||||||
|
|
||||||
|
func loadOSXFUSE(bin string) error {
|
||||||
|
cmd := exec.Command(bin)
|
||||||
|
cmd.Dir = "/"
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
err := cmd.Run()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func openOSXFUSEDev(devPrefix string) (*os.File, error) {
|
||||||
|
var f *os.File
|
||||||
|
var err error
|
||||||
|
for i := uint64(0); ; i++ {
|
||||||
|
path := devPrefix + strconv.FormatUint(i, 10)
|
||||||
|
f, err = os.OpenFile(path, os.O_RDWR, 0000)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if i == 0 {
|
||||||
|
// not even the first device was found -> fuse is not loaded
|
||||||
|
return nil, errNotLoaded
|
||||||
|
}
|
||||||
|
|
||||||
|
// we've run out of kernel-provided devices
|
||||||
|
return nil, errNoAvail
|
||||||
|
}
|
||||||
|
|
||||||
|
if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY {
|
||||||
|
// try the next one
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) {
|
||||||
|
var noMountpointPrefix = helperName + `: `
|
||||||
|
const noMountpointSuffix = `: No such file or directory`
|
||||||
|
return func(line string) (ignore bool) {
|
||||||
|
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||||
|
// re-extract it from the error message in case some layer
|
||||||
|
// changed the path
|
||||||
|
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||||
|
err := &MountpointDoesNotExistError{
|
||||||
|
Path: mountpoint,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errCh <- err:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// not the first error; fall back to logging it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBoringMountOSXFUSEError returns whether the Wait error is
|
||||||
|
// uninteresting; exit status 64 is.
|
||||||
|
func isBoringMountOSXFUSEError(err error) bool {
|
||||||
|
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||||
|
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error {
|
||||||
|
for k, v := range conf.options {
|
||||||
|
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||||
|
// Silly limitation but the mount helper does not
|
||||||
|
// understand any escaping. See TestMountOptionCommaError.
|
||||||
|
return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd := exec.Command(
|
||||||
|
bin,
|
||||||
|
"-o", conf.getOptions(),
|
||||||
|
// Tell osxfuse-kext how large our buffer is. It must split
|
||||||
|
// writes larger than this into multiple writes.
|
||||||
|
//
|
||||||
|
// OSXFUSE seems to ignore InitResponse.MaxWrite, and uses
|
||||||
|
// this instead.
|
||||||
|
"-o", "iosize="+strconv.FormatUint(maxWrite, 10),
|
||||||
|
// refers to fd passed in cmd.ExtraFiles
|
||||||
|
"3",
|
||||||
|
dir,
|
||||||
|
)
|
||||||
|
cmd.ExtraFiles = []*os.File{f}
|
||||||
|
cmd.Env = os.Environ()
|
||||||
|
// OSXFUSE <3.3.0
|
||||||
|
cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=")
|
||||||
|
// OSXFUSE >=3.3.0
|
||||||
|
cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=")
|
||||||
|
|
||||||
|
daemon := os.Args[0]
|
||||||
|
if daemonVar != "" {
|
||||||
|
cmd.Env = append(cmd.Env, daemonVar+"="+daemon)
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("mount_osxfusefs: %v", err)
|
||||||
|
}
|
||||||
|
helperErrCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||||
|
helperName := path.Base(bin)
|
||||||
|
go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr)
|
||||||
|
wg.Wait()
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// see if we have a better error to report
|
||||||
|
select {
|
||||||
|
case helperErr := <-helperErrCh:
|
||||||
|
// log the Wait error if it's not what we expected
|
||||||
|
if !isBoringMountOSXFUSEError(err) {
|
||||||
|
log.Printf("mount helper failed: %v", err)
|
||||||
|
}
|
||||||
|
// and now return what we grabbed from stderr as the real
|
||||||
|
// error
|
||||||
|
*errp = helperErr
|
||||||
|
close(ready)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// nope, fall back to generic message
|
||||||
|
}
|
||||||
|
|
||||||
|
*errp = fmt.Errorf("mount_osxfusefs: %v", err)
|
||||||
|
close(ready)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
*errp = nil
|
||||||
|
close(ready)
|
||||||
|
}()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||||
|
locations := conf.osxfuseLocations
|
||||||
|
if locations == nil {
|
||||||
|
locations = []OSXFUSEPaths{
|
||||||
|
OSXFUSELocationV3,
|
||||||
|
OSXFUSELocationV2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, loc := range locations {
|
||||||
|
if _, err := os.Stat(loc.Mount); os.IsNotExist(err) {
|
||||||
|
// try the other locations
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := openOSXFUSEDev(loc.DevicePrefix)
|
||||||
|
if err == errNotLoaded {
|
||||||
|
err = loadOSXFUSE(loc.Load)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// try again
|
||||||
|
f, err = openOSXFUSEDev(loc.DevicePrefix)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp)
|
||||||
|
if err != nil {
|
||||||
|
f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
return nil, ErrOSXFUSENotFound
|
||||||
|
}
|
111
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
Normal file
111
vendor/bazil.org/fuse/mount_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||||
|
return func(line string) (ignore bool) {
|
||||||
|
const (
|
||||||
|
noMountpointPrefix = `mount_fusefs: `
|
||||||
|
noMountpointSuffix = `: No such file or directory`
|
||||||
|
)
|
||||||
|
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||||
|
// re-extract it from the error message in case some layer
|
||||||
|
// changed the path
|
||||||
|
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||||
|
err := &MountpointDoesNotExistError{
|
||||||
|
Path: mountpoint,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errCh <- err:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// not the first error; fall back to logging it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBoringMountFusefsError returns whether the Wait error is
|
||||||
|
// uninteresting; exit status 1 is.
|
||||||
|
func isBoringMountFusefsError(err error) bool {
|
||||||
|
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||||
|
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) {
|
||||||
|
for k, v := range conf.options {
|
||||||
|
if strings.Contains(k, ",") || strings.Contains(v, ",") {
|
||||||
|
// Silly limitation but the mount helper does not
|
||||||
|
// understand any escaping. See TestMountOptionCommaError.
|
||||||
|
return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000)
|
||||||
|
if err != nil {
|
||||||
|
*errp = err
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(
|
||||||
|
"/sbin/mount_fusefs",
|
||||||
|
"--safe",
|
||||||
|
"-o", conf.getOptions(),
|
||||||
|
"3",
|
||||||
|
dir,
|
||||||
|
)
|
||||||
|
cmd.ExtraFiles = []*os.File{f}
|
||||||
|
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||||
|
}
|
||||||
|
helperErrCh := make(chan error, 1)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||||
|
go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr)
|
||||||
|
wg.Wait()
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// see if we have a better error to report
|
||||||
|
select {
|
||||||
|
case helperErr := <-helperErrCh:
|
||||||
|
// log the Wait error if it's not what we expected
|
||||||
|
if !isBoringMountFusefsError(err) {
|
||||||
|
log.Printf("mount helper failed: %v", err)
|
||||||
|
}
|
||||||
|
// and now return what we grabbed from stderr as the real
|
||||||
|
// error
|
||||||
|
return nil, helperErr
|
||||||
|
default:
|
||||||
|
// nope, fall back to generic message
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("mount_fusefs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(ready)
|
||||||
|
return f, nil
|
||||||
|
}
|
150
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
Normal file
150
vendor/bazil.org/fuse/mount_linux.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) {
|
||||||
|
return func(line string) (ignore bool) {
|
||||||
|
if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` {
|
||||||
|
// Silence this particular message, it occurs way too
|
||||||
|
// commonly and isn't very relevant to whether the mount
|
||||||
|
// succeeds or not.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
noMountpointPrefix = `fusermount: failed to access mountpoint `
|
||||||
|
noMountpointSuffix = `: No such file or directory`
|
||||||
|
)
|
||||||
|
if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) {
|
||||||
|
// re-extract it from the error message in case some layer
|
||||||
|
// changed the path
|
||||||
|
mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)]
|
||||||
|
err := &MountpointDoesNotExistError{
|
||||||
|
Path: mountpoint,
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case errCh <- err:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// not the first error; fall back to logging it
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isBoringFusermountError returns whether the Wait error is
|
||||||
|
// uninteresting; exit status 1 is.
|
||||||
|
func isBoringFusermountError(err error) bool {
|
||||||
|
if err, ok := err.(*exec.ExitError); ok && err.Exited() {
|
||||||
|
if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) {
|
||||||
|
// linux mount is never delayed
|
||||||
|
close(ready)
|
||||||
|
|
||||||
|
fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("socketpair error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes")
|
||||||
|
defer writeFile.Close()
|
||||||
|
|
||||||
|
readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads")
|
||||||
|
defer readFile.Close()
|
||||||
|
|
||||||
|
cmd := exec.Command(
|
||||||
|
"fusermount",
|
||||||
|
"-o", conf.getOptions(),
|
||||||
|
"--",
|
||||||
|
dir,
|
||||||
|
)
|
||||||
|
cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3")
|
||||||
|
|
||||||
|
cmd.ExtraFiles = []*os.File{writeFile}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("setting up fusermount stderr: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("fusermount: %v", err)
|
||||||
|
}
|
||||||
|
helperErrCh := make(chan error, 1)
|
||||||
|
wg.Add(2)
|
||||||
|
go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout)
|
||||||
|
go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr)
|
||||||
|
wg.Wait()
|
||||||
|
if err := cmd.Wait(); err != nil {
|
||||||
|
// see if we have a better error to report
|
||||||
|
select {
|
||||||
|
case helperErr := <-helperErrCh:
|
||||||
|
// log the Wait error if it's not what we expected
|
||||||
|
if !isBoringFusermountError(err) {
|
||||||
|
log.Printf("mount helper failed: %v", err)
|
||||||
|
}
|
||||||
|
// and now return what we grabbed from stderr as the real
|
||||||
|
// error
|
||||||
|
return nil, helperErr
|
||||||
|
default:
|
||||||
|
// nope, fall back to generic message
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("fusermount: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := net.FileConn(readFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("FileConn from fusermount socket: %v", err)
|
||||||
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
|
uc, ok := c.(*net.UnixConn)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 32) // expect 1 byte
|
||||||
|
oob := make([]byte, 32) // expect 24 bytes
|
||||||
|
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
|
||||||
|
scms, err := syscall.ParseSocketControlMessage(oob[:oobn])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("ParseSocketControlMessage: %v", err)
|
||||||
|
}
|
||||||
|
if len(scms) != 1 {
|
||||||
|
return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms)
|
||||||
|
}
|
||||||
|
scm := scms[0]
|
||||||
|
gotFds, err := syscall.ParseUnixRights(&scm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err)
|
||||||
|
}
|
||||||
|
if len(gotFds) != 1 {
|
||||||
|
return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds)
|
||||||
|
}
|
||||||
|
f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse")
|
||||||
|
return f, nil
|
||||||
|
}
|
310
vendor/bazil.org/fuse/options.go
generated
vendored
Normal file
310
vendor/bazil.org/fuse/options.go
generated
vendored
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dummyOption(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mountConfig holds the configuration for a mount operation.
|
||||||
|
// Use it by passing MountOption values to Mount.
|
||||||
|
type mountConfig struct {
|
||||||
|
options map[string]string
|
||||||
|
maxReadahead uint32
|
||||||
|
initFlags InitFlags
|
||||||
|
osxfuseLocations []OSXFUSEPaths
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeComma(s string) string {
|
||||||
|
s = strings.Replace(s, `\`, `\\`, -1)
|
||||||
|
s = strings.Replace(s, `,`, `\,`, -1)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOptions makes a string of options suitable for passing to FUSE
|
||||||
|
// mount flag `-o`. Returns an empty string if no options were set.
|
||||||
|
// Any platform specific adjustments should happen before the call.
|
||||||
|
func (m *mountConfig) getOptions() string {
|
||||||
|
var opts []string
|
||||||
|
for k, v := range m.options {
|
||||||
|
k = escapeComma(k)
|
||||||
|
if v != "" {
|
||||||
|
k += "=" + escapeComma(v)
|
||||||
|
}
|
||||||
|
opts = append(opts, k)
|
||||||
|
}
|
||||||
|
return strings.Join(opts, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
type mountOption func(*mountConfig) error
|
||||||
|
|
||||||
|
// MountOption is passed to Mount to change the behavior of the mount.
|
||||||
|
type MountOption mountOption
|
||||||
|
|
||||||
|
// FSName sets the file system name (also called source) that is
|
||||||
|
// visible in the list of mounted file systems.
|
||||||
|
//
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func FSName(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["fsname"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtype sets the subtype of the mount. The main type is always
|
||||||
|
// `fuse`. The type in a list of mounted file systems will look like
|
||||||
|
// `fuse.foo`.
|
||||||
|
//
|
||||||
|
// OS X ignores this option.
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func Subtype(fstype string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["subtype"] = fstype
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalVolume sets the volume to be local (instead of network),
|
||||||
|
// changing the behavior of Finder, Spotlight, and such.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func LocalVolume() MountOption {
|
||||||
|
return localVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeName sets the volume name shown in Finder.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func VolumeName(name string) MountOption {
|
||||||
|
return volumeName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoAppleDouble makes OSXFUSE disallow files with names used by OS X
|
||||||
|
// to store extended attributes on file systems that do not support
|
||||||
|
// them natively.
|
||||||
|
//
|
||||||
|
// Such file names are:
|
||||||
|
//
|
||||||
|
// ._*
|
||||||
|
// .DS_Store
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func NoAppleDouble() MountOption {
|
||||||
|
return noAppleDouble
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||||
|
// prefix "com.apple.". This disables persistent Finder state and
|
||||||
|
// other such information.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func NoAppleXattr() MountOption {
|
||||||
|
return noAppleXattr
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates,
|
||||||
|
// i.e. create calls for which the initiator explicitly set the O_EXCL flag.
|
||||||
|
//
|
||||||
|
// OSXFUSE expects all create calls to return EEXIST in case the file
|
||||||
|
// already exists, regardless of whether O_EXCL was specified or not.
|
||||||
|
// To ensure this behavior, it normally sets OpenExclusive for all
|
||||||
|
// Create calls, regardless of whether the original call had it set.
|
||||||
|
// For distributed filesystems, that may force every file create to be
|
||||||
|
// a distributed consensus action, causing undesirable delays.
|
||||||
|
//
|
||||||
|
// This option makes the FUSE filesystem see the original flag value,
|
||||||
|
// and better decide when to ensure global consensus.
|
||||||
|
//
|
||||||
|
// Note that returning EEXIST on existing file create is still
|
||||||
|
// expected with OSXFUSE, regardless of the presence of the
|
||||||
|
// OpenExclusive flag.
|
||||||
|
//
|
||||||
|
// For more information, see
|
||||||
|
// https://github.com/osxfuse/osxfuse/issues/209
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this options.
|
||||||
|
// Requires OSXFUSE 3.4.1 or newer.
|
||||||
|
func ExclCreate() MountOption {
|
||||||
|
return exclCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonTimeout sets the time in seconds between a request and a reply before
|
||||||
|
// the FUSE mount is declared dead.
|
||||||
|
//
|
||||||
|
// OS X and FreeBSD only. Others ignore this option.
|
||||||
|
func DaemonTimeout(name string) MountOption {
|
||||||
|
return daemonTimeout(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot")
|
||||||
|
|
||||||
|
// AllowOther allows other users to access the file system.
|
||||||
|
//
|
||||||
|
// Only one of AllowOther or AllowRoot can be used.
|
||||||
|
func AllowOther() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
if _, ok := conf.options["allow_root"]; ok {
|
||||||
|
return ErrCannotCombineAllowOtherAndAllowRoot
|
||||||
|
}
|
||||||
|
conf.options["allow_other"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowRoot allows other users to access the file system.
|
||||||
|
//
|
||||||
|
// Only one of AllowOther or AllowRoot can be used.
|
||||||
|
//
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func AllowRoot() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
if _, ok := conf.options["allow_other"]; ok {
|
||||||
|
return ErrCannotCombineAllowOtherAndAllowRoot
|
||||||
|
}
|
||||||
|
conf.options["allow_root"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowDev enables interpreting character or block special devices on the
|
||||||
|
// filesystem.
|
||||||
|
func AllowDev() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["dev"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowSUID allows set-user-identifier or set-group-identifier bits to take
|
||||||
|
// effect.
|
||||||
|
func AllowSUID() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["suid"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultPermissions makes the kernel enforce access control based on
|
||||||
|
// the file mode (as in chmod).
|
||||||
|
//
|
||||||
|
// Without this option, the Node itself decides what is and is not
|
||||||
|
// allowed. This is normally ok because FUSE file systems cannot be
|
||||||
|
// accessed by other users without AllowOther/AllowRoot.
|
||||||
|
//
|
||||||
|
// FreeBSD ignores this option.
|
||||||
|
func DefaultPermissions() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["default_permissions"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadOnly makes the mount read-only.
|
||||||
|
func ReadOnly() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["ro"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxReadahead sets the number of bytes that can be prefetched for
|
||||||
|
// sequential reads. The kernel can enforce a maximum value lower than
|
||||||
|
// this.
|
||||||
|
//
|
||||||
|
// This setting makes the kernel perform speculative reads that do not
|
||||||
|
// originate from any client process. This usually tremendously
|
||||||
|
// improves read performance.
|
||||||
|
func MaxReadahead(n uint32) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.maxReadahead = n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsyncRead enables multiple outstanding read requests for the same
|
||||||
|
// handle. Without this, there is at most one request in flight at a
|
||||||
|
// time.
|
||||||
|
func AsyncRead() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.initFlags |= InitAsyncRead
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritebackCache enables the kernel to buffer writes before sending
|
||||||
|
// them to the FUSE server. Without this, writethrough caching is
|
||||||
|
// used.
|
||||||
|
func WritebackCache() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.initFlags |= InitWritebackCache
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OSXFUSEPaths describes the paths used by an installed OSXFUSE
|
||||||
|
// version. See OSXFUSELocationV3 for typical values.
|
||||||
|
type OSXFUSEPaths struct {
|
||||||
|
// Prefix for the device file. At mount time, an incrementing
|
||||||
|
// number is suffixed until a free FUSE device is found.
|
||||||
|
DevicePrefix string
|
||||||
|
// Path of the load helper, used to load the kernel extension if
|
||||||
|
// no device files are found.
|
||||||
|
Load string
|
||||||
|
// Path of the mount helper, used for the actual mount operation.
|
||||||
|
Mount string
|
||||||
|
// Environment variable used to pass the path to the executable
|
||||||
|
// calling the mount helper.
|
||||||
|
DaemonVar string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default paths for OSXFUSE. See OSXFUSELocations.
|
||||||
|
var (
|
||||||
|
OSXFUSELocationV3 = OSXFUSEPaths{
|
||||||
|
DevicePrefix: "/dev/osxfuse",
|
||||||
|
Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse",
|
||||||
|
Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse",
|
||||||
|
DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH",
|
||||||
|
}
|
||||||
|
OSXFUSELocationV2 = OSXFUSEPaths{
|
||||||
|
DevicePrefix: "/dev/osxfuse",
|
||||||
|
Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs",
|
||||||
|
Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs",
|
||||||
|
DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// OSXFUSELocations sets where to look for OSXFUSE files. The
|
||||||
|
// arguments are all the possible locations. The previous locations
|
||||||
|
// are replaced.
|
||||||
|
//
|
||||||
|
// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are
|
||||||
|
// used.
|
||||||
|
//
|
||||||
|
// OS X only. Others ignore this option.
|
||||||
|
func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return errors.New("must specify at least one location for OSXFUSELocations")
|
||||||
|
}
|
||||||
|
// replace previous values, but make a copy so there's no
|
||||||
|
// worries about caller mutating their slice
|
||||||
|
conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowNonEmptyMount allows the mounting over a non-empty directory.
|
||||||
|
//
|
||||||
|
// The files in it will be shadowed by the freshly created mount. By
|
||||||
|
// default these mounts are rejected to prevent accidental covering up
|
||||||
|
// of data, which could for example prevent automatic backup.
|
||||||
|
func AllowNonEmptyMount() MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["nonempty"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
35
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
Normal file
35
vendor/bazil.org/fuse/options_darwin.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
func localVolume(conf *mountConfig) error {
|
||||||
|
conf.options["local"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeName(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["volname"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func daemonTimeout(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["daemon_timeout"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleXattr(conf *mountConfig) error {
|
||||||
|
conf.options["noapplexattr"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleDouble(conf *mountConfig) error {
|
||||||
|
conf.options["noappledouble"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exclCreate(conf *mountConfig) error {
|
||||||
|
conf.options["excl_create"] = ""
|
||||||
|
return nil
|
||||||
|
}
|
28
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
Normal file
28
vendor/bazil.org/fuse/options_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
func localVolume(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeName(name string) MountOption {
|
||||||
|
return dummyOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func daemonTimeout(name string) MountOption {
|
||||||
|
return func(conf *mountConfig) error {
|
||||||
|
conf.options["timeout"] = name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleXattr(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleDouble(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exclCreate(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
25
vendor/bazil.org/fuse/options_linux.go
generated
vendored
Normal file
25
vendor/bazil.org/fuse/options_linux.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
func localVolume(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func volumeName(name string) MountOption {
|
||||||
|
return dummyOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func daemonTimeout(name string) MountOption {
|
||||||
|
return dummyOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleXattr(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func noAppleDouble(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exclCreate(conf *mountConfig) error {
|
||||||
|
return nil
|
||||||
|
}
|
75
vendor/bazil.org/fuse/protocol.go
generated
vendored
Normal file
75
vendor/bazil.org/fuse/protocol.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Protocol is a FUSE protocol version number.
|
||||||
|
type Protocol struct {
|
||||||
|
Major uint32
|
||||||
|
Minor uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Protocol) String() string {
|
||||||
|
return fmt.Sprintf("%d.%d", p.Major, p.Minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LT returns whether a is less than b.
|
||||||
|
func (a Protocol) LT(b Protocol) bool {
|
||||||
|
return a.Major < b.Major ||
|
||||||
|
(a.Major == b.Major && a.Minor < b.Minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GE returns whether a is greater than or equal to b.
|
||||||
|
func (a Protocol) GE(b Protocol) bool {
|
||||||
|
return a.Major > b.Major ||
|
||||||
|
(a.Major == b.Major && a.Minor >= b.Minor)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Protocol) is79() bool {
|
||||||
|
return a.GE(Protocol{7, 9})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAttrBlockSize returns whether Attr.BlockSize is respected by the
|
||||||
|
// kernel.
|
||||||
|
func (a Protocol) HasAttrBlockSize() bool {
|
||||||
|
return a.is79()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadWriteFlags returns whether ReadRequest/WriteRequest
|
||||||
|
// fields Flags and FileFlags are valid.
|
||||||
|
func (a Protocol) HasReadWriteFlags() bool {
|
||||||
|
return a.is79()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGetattrFlags returns whether GetattrRequest field Flags is
|
||||||
|
// valid.
|
||||||
|
func (a Protocol) HasGetattrFlags() bool {
|
||||||
|
return a.is79()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Protocol) is710() bool {
|
||||||
|
return a.GE(Protocol{7, 10})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasOpenNonSeekable returns whether OpenResponse field Flags flag
|
||||||
|
// OpenNonSeekable is supported.
|
||||||
|
func (a Protocol) HasOpenNonSeekable() bool {
|
||||||
|
return a.is710()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Protocol) is712() bool {
|
||||||
|
return a.GE(Protocol{7, 12})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest
|
||||||
|
// field Umask is valid.
|
||||||
|
func (a Protocol) HasUmask() bool {
|
||||||
|
return a.is712()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasInvalidate returns whether InvalidateNode/InvalidateEntry are
|
||||||
|
// supported.
|
||||||
|
func (a Protocol) HasInvalidate() bool {
|
||||||
|
return a.is712()
|
||||||
|
}
|
6
vendor/bazil.org/fuse/unmount.go
generated
vendored
Normal file
6
vendor/bazil.org/fuse/unmount.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
// Unmount tries to unmount the filesystem mounted at dir.
|
||||||
|
func Unmount(dir string) error {
|
||||||
|
return unmount(dir)
|
||||||
|
}
|
21
vendor/bazil.org/fuse/unmount_linux.go
generated
vendored
Normal file
21
vendor/bazil.org/fuse/unmount_linux.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func unmount(dir string) error {
|
||||||
|
cmd := exec.Command("fusermount", "-u", dir)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
if len(output) > 0 {
|
||||||
|
output = bytes.TrimRight(output, "\n")
|
||||||
|
msg := err.Error() + ": " + string(output)
|
||||||
|
err = errors.New(msg)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
17
vendor/bazil.org/fuse/unmount_std.go
generated
vendored
Normal file
17
vendor/bazil.org/fuse/unmount_std.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func unmount(dir string) error {
|
||||||
|
err := syscall.Unmount(dir, 0)
|
||||||
|
if err != nil {
|
||||||
|
err = &os.PathError{Op: "unmount", Path: dir, Err: err}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
1
vendor/github.com/AndreasBriese/bbloom/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/AndreasBriese/bbloom/.travis.yml
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
language: go
|
35
vendor/github.com/AndreasBriese/bbloom/LICENSE
generated
vendored
Normal file
35
vendor/github.com/AndreasBriese/bbloom/LICENSE
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
bbloom.go
|
||||||
|
|
||||||
|
// The MIT License (MIT)
|
||||||
|
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
// this software and associated documentation files (the "Software"), to deal in
|
||||||
|
// the Software without restriction, including without limitation the rights to
|
||||||
|
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
// subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
siphash.go
|
||||||
|
|
||||||
|
// https://github.com/dchest/siphash
|
||||||
|
//
|
||||||
|
// Written in 2012 by Dmitry Chestnykh.
|
||||||
|
//
|
||||||
|
// To the extent possible under law, the author have dedicated all copyright
|
||||||
|
// and related and neighboring rights to this software to the public domain
|
||||||
|
// worldwide. This software is distributed without any warranty.
|
||||||
|
// http://creativecommons.org/publicdomain/zero/1.0/
|
||||||
|
//
|
||||||
|
// Package siphash implements SipHash-2-4, a fast short-input PRF
|
||||||
|
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
|
131
vendor/github.com/AndreasBriese/bbloom/README.md
generated
vendored
Normal file
131
vendor/github.com/AndreasBriese/bbloom/README.md
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
## bbloom: a bitset Bloom filter for go/golang
|
||||||
|
===
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/AndreasBriese/bbloom.png?branch=master)](http://travis-ci.org/AndreasBriese/bbloom)
|
||||||
|
|
||||||
|
package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
|
||||||
|
|
||||||
|
NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
|
||||||
|
|
||||||
|
===
|
||||||
|
|
||||||
|
changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
|
||||||
|
|
||||||
|
This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
|
||||||
|
Nonetheless bbloom should work with any other form of entries.
|
||||||
|
|
||||||
|
~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
|
||||||
|
|
||||||
|
Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
|
||||||
|
|
||||||
|
Minimum hashset size is: 512 ([4]uint64; will be set automatically).
|
||||||
|
|
||||||
|
###install
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get github.com/AndreasBriese/bbloom
|
||||||
|
```
|
||||||
|
|
||||||
|
###test
|
||||||
|
+ change to folder ../bbloom
|
||||||
|
+ create wordlist in file "words.txt" (you might use `python permut.py`)
|
||||||
|
+ run 'go test -bench=.' within the folder
|
||||||
|
|
||||||
|
```go
|
||||||
|
go test -bench=.
|
||||||
|
```
|
||||||
|
|
||||||
|
~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
|
||||||
|
|
||||||
|
using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
|
||||||
|
|
||||||
|
### usage
|
||||||
|
|
||||||
|
after installation add
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
...
|
||||||
|
"github.com/AndreasBriese/bbloom"
|
||||||
|
...
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
at your header. In the program use
|
||||||
|
|
||||||
|
```go
|
||||||
|
// create a bloom filter for 65536 items and 1 % wrong-positive ratio
|
||||||
|
bf := bbloom.New(float64(1<<16), float64(0.01))
|
||||||
|
|
||||||
|
// or
|
||||||
|
// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
|
||||||
|
// bf = bbloom.New(float64(650000), float64(7))
|
||||||
|
// or
|
||||||
|
bf = bbloom.New(650000.0, 7.0)
|
||||||
|
|
||||||
|
// add one item
|
||||||
|
bf.Add([]byte("butter"))
|
||||||
|
|
||||||
|
// Number of elements added is exposed now
|
||||||
|
// Note: ElemNum will not be included in JSON export (for compatability to older version)
|
||||||
|
nOfElementsInFilter := bf.ElemNum
|
||||||
|
|
||||||
|
// check if item is in the filter
|
||||||
|
isIn := bf.Has([]byte("butter")) // should be true
|
||||||
|
isNotIn := bf.Has([]byte("Butter")) // should be false
|
||||||
|
|
||||||
|
// 'add only if item is new' to the bloomfilter
|
||||||
|
added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
|
||||||
|
added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
|
||||||
|
|
||||||
|
// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
|
||||||
|
// add one item
|
||||||
|
bf.AddTS([]byte("peanutbutter"))
|
||||||
|
// check if item is in the filter
|
||||||
|
isIn = bf.HasTS([]byte("peanutbutter")) // should be true
|
||||||
|
isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
|
||||||
|
// 'add only if item is new' to the bloomfilter
|
||||||
|
added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
|
||||||
|
added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
|
||||||
|
|
||||||
|
// convert to JSON ([]byte)
|
||||||
|
Json := bf.JSONMarshal()
|
||||||
|
|
||||||
|
// bloomfilters Mutex is exposed for external un-/locking
|
||||||
|
// i.e. mutex lock while doing JSON conversion
|
||||||
|
bf.Mtx.Lock()
|
||||||
|
Json = bf.JSONMarshal()
|
||||||
|
bf.Mtx.Unlock()
|
||||||
|
|
||||||
|
// restore a bloom filter from storage
|
||||||
|
bfNew := bbloom.JSONUnmarshal(Json)
|
||||||
|
|
||||||
|
isInNew := bfNew.Has([]byte("butter")) // should be true
|
||||||
|
isNotInNew := bfNew.Has([]byte("Butter")) // should be false
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
to work with the bloom filter.
|
||||||
|
|
||||||
|
### why 'fast'?
|
||||||
|
|
||||||
|
It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
|
||||||
|
|
||||||
|
|
||||||
|
Bloom filter (filter size 524288, 7 hashlocs)
|
||||||
|
github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
|
||||||
|
github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
|
||||||
|
github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
|
||||||
|
github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
|
||||||
|
|
||||||
|
github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
|
||||||
|
github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
|
||||||
|
github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
|
||||||
|
github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
|
||||||
|
github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
|
||||||
|
github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
|
||||||
|
|
||||||
|
(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
|
||||||
|
|
||||||
|
|
||||||
|
With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.
|
270
vendor/github.com/AndreasBriese/bbloom/bbloom.go
generated
vendored
Normal file
270
vendor/github.com/AndreasBriese/bbloom/bbloom.go
generated
vendored
Normal file
@ -0,0 +1,270 @@
|
|||||||
|
// The MIT License (MIT)
|
||||||
|
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
// this software and associated documentation files (the "Software"), to deal in
|
||||||
|
// the Software without restriction, including without limitation the rights to
|
||||||
|
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
// subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
package bbloom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// helper
|
||||||
|
var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
|
||||||
|
|
||||||
|
func getSize(ui64 uint64) (size uint64, exponent uint64) {
|
||||||
|
if ui64 < uint64(512) {
|
||||||
|
ui64 = uint64(512)
|
||||||
|
}
|
||||||
|
size = uint64(1)
|
||||||
|
for size < ui64 {
|
||||||
|
size <<= 1
|
||||||
|
exponent++
|
||||||
|
}
|
||||||
|
return size, exponent
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
|
||||||
|
size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
|
||||||
|
locs := math.Ceil(float64(0.69314718056) * size / numEntries)
|
||||||
|
return uint64(size), uint64(locs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New
|
||||||
|
// returns a new bloomfilter
|
||||||
|
func New(params ...float64) (bloomfilter Bloom) {
|
||||||
|
var entries, locs uint64
|
||||||
|
if len(params) == 2 {
|
||||||
|
if params[1] < 1 {
|
||||||
|
entries, locs = calcSizeByWrongPositives(params[0], params[1])
|
||||||
|
} else {
|
||||||
|
entries, locs = uint64(params[0]), uint64(params[1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
|
||||||
|
}
|
||||||
|
size, exponent := getSize(uint64(entries))
|
||||||
|
bloomfilter = Bloom{
|
||||||
|
sizeExp: exponent,
|
||||||
|
size: size - 1,
|
||||||
|
setLocs: locs,
|
||||||
|
shift: 64 - exponent,
|
||||||
|
}
|
||||||
|
bloomfilter.Size(size)
|
||||||
|
return bloomfilter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithBoolset
|
||||||
|
// takes a []byte slice and number of locs per entry
|
||||||
|
// returns the bloomfilter with a bitset populated according to the input []byte
|
||||||
|
func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) {
|
||||||
|
bloomfilter = New(float64(len(*bs)<<3), float64(locs))
|
||||||
|
ptr := uintptr(unsafe.Pointer(&bloomfilter.bitset[0]))
|
||||||
|
for _, b := range *bs {
|
||||||
|
*(*uint8)(unsafe.Pointer(ptr)) = b
|
||||||
|
ptr++
|
||||||
|
}
|
||||||
|
return bloomfilter
|
||||||
|
}
|
||||||
|
|
||||||
|
// bloomJSONImExport
|
||||||
|
// Im/Export structure used by JSONMarshal / JSONUnmarshal
|
||||||
|
type bloomJSONImExport struct {
|
||||||
|
FilterSet []byte
|
||||||
|
SetLocs uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONUnmarshal
|
||||||
|
// takes JSON-Object (type bloomJSONImExport) as []bytes
|
||||||
|
// returns bloom32 / bloom64 object
|
||||||
|
func JSONUnmarshal(dbData []byte) Bloom {
|
||||||
|
bloomImEx := bloomJSONImExport{}
|
||||||
|
json.Unmarshal(dbData, &bloomImEx)
|
||||||
|
buf := bytes.NewBuffer(bloomImEx.FilterSet)
|
||||||
|
bs := buf.Bytes()
|
||||||
|
bf := NewWithBoolset(&bs, bloomImEx.SetLocs)
|
||||||
|
return bf
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Bloom filter
|
||||||
|
type Bloom struct {
|
||||||
|
Mtx sync.Mutex
|
||||||
|
ElemNum uint64
|
||||||
|
bitset []uint64
|
||||||
|
sizeExp uint64
|
||||||
|
size uint64
|
||||||
|
setLocs uint64
|
||||||
|
shift uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// <--- http://www.cse.yorku.ca/~oz/hash.html
|
||||||
|
// modified Berkeley DB Hash (32bit)
|
||||||
|
// hash is casted to l, h = 16bit fragments
|
||||||
|
// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
|
||||||
|
// hash := uint64(len(*b))
|
||||||
|
// for _, c := range *b {
|
||||||
|
// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
|
||||||
|
// }
|
||||||
|
// h = hash >> bl.shift
|
||||||
|
// l = hash << bl.shift >> bl.shift
|
||||||
|
// return l, h
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm()
|
||||||
|
// https://131002.net/siphash/
|
||||||
|
// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash
|
||||||
|
|
||||||
|
// Add
|
||||||
|
// set the bit(s) for entry; Adds an entry to the Bloom filter
|
||||||
|
func (bl *Bloom) Add(entry []byte) {
|
||||||
|
l, h := bl.sipHash(entry)
|
||||||
|
for i := uint64(0); i < (*bl).setLocs; i++ {
|
||||||
|
(*bl).Set((h + i*l) & (*bl).size)
|
||||||
|
(*bl).ElemNum++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddTS
|
||||||
|
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
|
||||||
|
func (bl *Bloom) AddTS(entry []byte) {
|
||||||
|
bl.Mtx.Lock()
|
||||||
|
defer bl.Mtx.Unlock()
|
||||||
|
bl.Add(entry[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has
|
||||||
|
// check if bit(s) for entry is/are set
|
||||||
|
// returns true if the entry was added to the Bloom Filter
|
||||||
|
func (bl Bloom) Has(entry []byte) bool {
|
||||||
|
l, h := bl.sipHash(entry)
|
||||||
|
for i := uint64(0); i < bl.setLocs; i++ {
|
||||||
|
switch bl.IsSet((h + i*l) & bl.size) {
|
||||||
|
case false:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasTS
|
||||||
|
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
|
||||||
|
func (bl *Bloom) HasTS(entry []byte) bool {
|
||||||
|
bl.Mtx.Lock()
|
||||||
|
defer bl.Mtx.Unlock()
|
||||||
|
return bl.Has(entry[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddIfNotHas
|
||||||
|
// Only Add entry if it's not present in the bloomfilter
|
||||||
|
// returns true if entry was added
|
||||||
|
// returns false if entry was allready registered in the bloomfilter
|
||||||
|
func (bl Bloom) AddIfNotHas(entry []byte) (added bool) {
|
||||||
|
if bl.Has(entry[:]) {
|
||||||
|
return added
|
||||||
|
}
|
||||||
|
bl.Add(entry[:])
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddIfNotHasTS
|
||||||
|
// Tread safe: Only Add entry if it's not present in the bloomfilter
|
||||||
|
// returns true if entry was added
|
||||||
|
// returns false if entry was allready registered in the bloomfilter
|
||||||
|
func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) {
|
||||||
|
bl.Mtx.Lock()
|
||||||
|
defer bl.Mtx.Unlock()
|
||||||
|
return bl.AddIfNotHas(entry[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size
|
||||||
|
// make Bloom filter with as bitset of size sz
|
||||||
|
func (bl *Bloom) Size(sz uint64) {
|
||||||
|
(*bl).bitset = make([]uint64, sz>>6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear
|
||||||
|
// resets the Bloom filter
|
||||||
|
func (bl *Bloom) Clear() {
|
||||||
|
for i, _ := range (*bl).bitset {
|
||||||
|
(*bl).bitset[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set
|
||||||
|
// set the bit[idx] of bitsit
|
||||||
|
func (bl *Bloom) Set(idx uint64) {
|
||||||
|
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
|
||||||
|
*(*uint8)(ptr) |= mask[idx%8]
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSet
|
||||||
|
// check if bit[idx] of bitset is set
|
||||||
|
// returns true/false
|
||||||
|
func (bl *Bloom) IsSet(idx uint64) bool {
|
||||||
|
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
|
||||||
|
r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
|
||||||
|
return r == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONMarshal
|
||||||
|
// returns JSON-object (type bloomJSONImExport) as []byte
|
||||||
|
func (bl Bloom) JSONMarshal() []byte {
|
||||||
|
bloomImEx := bloomJSONImExport{}
|
||||||
|
bloomImEx.SetLocs = uint64(bl.setLocs)
|
||||||
|
bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
|
||||||
|
ptr := uintptr(unsafe.Pointer(&bl.bitset[0]))
|
||||||
|
for i := range bloomImEx.FilterSet {
|
||||||
|
bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(ptr))
|
||||||
|
ptr++
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(bloomImEx)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("json.Marshal failed: ", err)
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// // alternative hashFn
|
||||||
|
// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) {
|
||||||
|
// h64 := fnv.New64a()
|
||||||
|
// h64.Write(*b)
|
||||||
|
// hash := h64.Sum64()
|
||||||
|
// h = hash >> 32
|
||||||
|
// l = hash << 32 >> 32
|
||||||
|
// return l, h
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // <-- http://partow.net/programming/hashfunctions/index.html
|
||||||
|
// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3,
|
||||||
|
// // under the topic of sorting and search chapter 6.4.
|
||||||
|
// // modified to fit with boolset-length
|
||||||
|
// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) {
|
||||||
|
// hash := uint64(len(*b))
|
||||||
|
// for _, c := range *b {
|
||||||
|
// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c)
|
||||||
|
// }
|
||||||
|
// h = hash >> bl.shift
|
||||||
|
// l = hash << bl.sizeExp >> bl.sizeExp
|
||||||
|
// return l, h
|
||||||
|
// }
|
225
vendor/github.com/AndreasBriese/bbloom/sipHash.go
generated
vendored
Normal file
225
vendor/github.com/AndreasBriese/bbloom/sipHash.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
// Written in 2012 by Dmitry Chestnykh.
|
||||||
|
//
|
||||||
|
// To the extent possible under law, the author have dedicated all copyright
|
||||||
|
// and related and neighboring rights to this software to the public domain
|
||||||
|
// worldwide. This software is distributed without any warranty.
|
||||||
|
// http://creativecommons.org/publicdomain/zero/1.0/
|
||||||
|
//
|
||||||
|
// Package siphash implements SipHash-2-4, a fast short-input PRF
|
||||||
|
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
|
||||||
|
|
||||||
|
package bbloom
|
||||||
|
|
||||||
|
// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit
|
||||||
|
// parts of 128-bit key: k0 and k1.
|
||||||
|
func (bl Bloom) sipHash(p []byte) (l, h uint64) {
|
||||||
|
// Initialization.
|
||||||
|
v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575
|
||||||
|
v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d
|
||||||
|
v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261
|
||||||
|
v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573
|
||||||
|
t := uint64(len(p)) << 56
|
||||||
|
|
||||||
|
// Compression.
|
||||||
|
for len(p) >= 8 {
|
||||||
|
|
||||||
|
m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 |
|
||||||
|
uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
|
||||||
|
|
||||||
|
v3 ^= m
|
||||||
|
|
||||||
|
// Round 1.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
// Round 2.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
v0 ^= m
|
||||||
|
p = p[8:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compress last block.
|
||||||
|
switch len(p) {
|
||||||
|
case 7:
|
||||||
|
t |= uint64(p[6]) << 48
|
||||||
|
fallthrough
|
||||||
|
case 6:
|
||||||
|
t |= uint64(p[5]) << 40
|
||||||
|
fallthrough
|
||||||
|
case 5:
|
||||||
|
t |= uint64(p[4]) << 32
|
||||||
|
fallthrough
|
||||||
|
case 4:
|
||||||
|
t |= uint64(p[3]) << 24
|
||||||
|
fallthrough
|
||||||
|
case 3:
|
||||||
|
t |= uint64(p[2]) << 16
|
||||||
|
fallthrough
|
||||||
|
case 2:
|
||||||
|
t |= uint64(p[1]) << 8
|
||||||
|
fallthrough
|
||||||
|
case 1:
|
||||||
|
t |= uint64(p[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
v3 ^= t
|
||||||
|
|
||||||
|
// Round 1.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
// Round 2.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
v0 ^= t
|
||||||
|
|
||||||
|
// Finalization.
|
||||||
|
v2 ^= 0xff
|
||||||
|
|
||||||
|
// Round 1.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
// Round 2.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
// Round 3.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
// Round 4.
|
||||||
|
v0 += v1
|
||||||
|
v1 = v1<<13 | v1>>51
|
||||||
|
v1 ^= v0
|
||||||
|
v0 = v0<<32 | v0>>32
|
||||||
|
|
||||||
|
v2 += v3
|
||||||
|
v3 = v3<<16 | v3>>48
|
||||||
|
v3 ^= v2
|
||||||
|
|
||||||
|
v0 += v3
|
||||||
|
v3 = v3<<21 | v3>>43
|
||||||
|
v3 ^= v0
|
||||||
|
|
||||||
|
v2 += v1
|
||||||
|
v1 = v1<<17 | v1>>47
|
||||||
|
v1 ^= v2
|
||||||
|
v2 = v2<<32 | v2>>32
|
||||||
|
|
||||||
|
// return v0 ^ v1 ^ v2 ^ v3
|
||||||
|
|
||||||
|
hash := v0 ^ v1 ^ v2 ^ v3
|
||||||
|
h = hash >> bl.shift
|
||||||
|
l = hash << bl.shift >> bl.shift
|
||||||
|
return l, h
|
||||||
|
|
||||||
|
}
|
140
vendor/github.com/AndreasBriese/bbloom/words.txt
generated
vendored
Normal file
140
vendor/github.com/AndreasBriese/bbloom/words.txt
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
2014/01/01 00:00:00 /info.html
|
||||||
|
2014/01/01 00:00:00 /info.html
|
||||||
|
2014/01/01 00:00:01 /info.html
|
||||||
|
2014/01/01 00:00:02 /info.html
|
||||||
|
2014/01/01 00:00:03 /info.html
|
||||||
|
2014/01/01 00:00:04 /info.html
|
||||||
|
2014/01/01 00:00:05 /info.html
|
||||||
|
2014/01/01 00:00:06 /info.html
|
||||||
|
2014/01/01 00:00:07 /info.html
|
||||||
|
2014/01/01 00:00:08 /info.html
|
||||||
|
2014/01/01 00:00:09 /info.html
|
||||||
|
2014/01/01 00:00:10 /info.html
|
||||||
|
2014/01/01 00:00:11 /info.html
|
||||||
|
2014/01/01 00:00:12 /info.html
|
||||||
|
2014/01/01 00:00:13 /info.html
|
||||||
|
2014/01/01 00:00:14 /info.html
|
||||||
|
2014/01/01 00:00:15 /info.html
|
||||||
|
2014/01/01 00:00:16 /info.html
|
||||||
|
2014/01/01 00:00:17 /info.html
|
||||||
|
2014/01/01 00:00:18 /info.html
|
||||||
|
2014/01/01 00:00:19 /info.html
|
||||||
|
2014/01/01 00:00:20 /info.html
|
||||||
|
2014/01/01 00:00:21 /info.html
|
||||||
|
2014/01/01 00:00:22 /info.html
|
||||||
|
2014/01/01 00:00:23 /info.html
|
||||||
|
2014/01/01 00:00:24 /info.html
|
||||||
|
2014/01/01 00:00:25 /info.html
|
||||||
|
2014/01/01 00:00:26 /info.html
|
||||||
|
2014/01/01 00:00:27 /info.html
|
||||||
|
2014/01/01 00:00:28 /info.html
|
||||||
|
2014/01/01 00:00:29 /info.html
|
||||||
|
2014/01/01 00:00:30 /info.html
|
||||||
|
2014/01/01 00:00:31 /info.html
|
||||||
|
2014/01/01 00:00:32 /info.html
|
||||||
|
2014/01/01 00:00:33 /info.html
|
||||||
|
2014/01/01 00:00:34 /info.html
|
||||||
|
2014/01/01 00:00:35 /info.html
|
||||||
|
2014/01/01 00:00:36 /info.html
|
||||||
|
2014/01/01 00:00:37 /info.html
|
||||||
|
2014/01/01 00:00:38 /info.html
|
||||||
|
2014/01/01 00:00:39 /info.html
|
||||||
|
2014/01/01 00:00:40 /info.html
|
||||||
|
2014/01/01 00:00:41 /info.html
|
||||||
|
2014/01/01 00:00:42 /info.html
|
||||||
|
2014/01/01 00:00:43 /info.html
|
||||||
|
2014/01/01 00:00:44 /info.html
|
||||||
|
2014/01/01 00:00:45 /info.html
|
||||||
|
2014/01/01 00:00:46 /info.html
|
||||||
|
2014/01/01 00:00:47 /info.html
|
||||||
|
2014/01/01 00:00:48 /info.html
|
||||||
|
2014/01/01 00:00:49 /info.html
|
||||||
|
2014/01/01 00:00:50 /info.html
|
||||||
|
2014/01/01 00:00:51 /info.html
|
||||||
|
2014/01/01 00:00:52 /info.html
|
||||||
|
2014/01/01 00:00:53 /info.html
|
||||||
|
2014/01/01 00:00:54 /info.html
|
||||||
|
2014/01/01 00:00:55 /info.html
|
||||||
|
2014/01/01 00:00:56 /info.html
|
||||||
|
2014/01/01 00:00:57 /info.html
|
||||||
|
2014/01/01 00:00:58 /info.html
|
||||||
|
2014/01/01 00:00:59 /info.html
|
||||||
|
2014/01/01 00:01:00 /info.html
|
||||||
|
2014/01/01 00:01:01 /info.html
|
||||||
|
2014/01/01 00:01:02 /info.html
|
||||||
|
2014/01/01 00:01:03 /info.html
|
||||||
|
2014/01/01 00:01:04 /info.html
|
||||||
|
2014/01/01 00:01:05 /info.html
|
||||||
|
2014/01/01 00:01:06 /info.html
|
||||||
|
2014/01/01 00:01:07 /info.html
|
||||||
|
2014/01/01 00:01:08 /info.html
|
||||||
|
2014/01/01 00:01:09 /info.html
|
||||||
|
2014/01/01 00:01:10 /info.html
|
||||||
|
2014/01/01 00:01:11 /info.html
|
||||||
|
2014/01/01 00:01:12 /info.html
|
||||||
|
2014/01/01 00:01:13 /info.html
|
||||||
|
2014/01/01 00:01:14 /info.html
|
||||||
|
2014/01/01 00:01:15 /info.html
|
||||||
|
2014/01/01 00:01:16 /info.html
|
||||||
|
2014/01/01 00:01:17 /info.html
|
||||||
|
2014/01/01 00:01:18 /info.html
|
||||||
|
2014/01/01 00:01:19 /info.html
|
||||||
|
2014/01/01 00:01:20 /info.html
|
||||||
|
2014/01/01 00:01:21 /info.html
|
||||||
|
2014/01/01 00:01:22 /info.html
|
||||||
|
2014/01/01 00:01:23 /info.html
|
||||||
|
2014/01/01 00:01:24 /info.html
|
||||||
|
2014/01/01 00:01:25 /info.html
|
||||||
|
2014/01/01 00:01:26 /info.html
|
||||||
|
2014/01/01 00:01:27 /info.html
|
||||||
|
2014/01/01 00:01:28 /info.html
|
||||||
|
2014/01/01 00:01:29 /info.html
|
||||||
|
2014/01/01 00:01:30 /info.html
|
||||||
|
2014/01/01 00:01:31 /info.html
|
||||||
|
2014/01/01 00:01:32 /info.html
|
||||||
|
2014/01/01 00:01:33 /info.html
|
||||||
|
2014/01/01 00:01:34 /info.html
|
||||||
|
2014/01/01 00:01:35 /info.html
|
||||||
|
2014/01/01 00:01:36 /info.html
|
||||||
|
2014/01/01 00:01:37 /info.html
|
||||||
|
2014/01/01 00:01:38 /info.html
|
||||||
|
2014/01/01 00:01:39 /info.html
|
||||||
|
2014/01/01 00:01:40 /info.html
|
||||||
|
2014/01/01 00:01:41 /info.html
|
||||||
|
2014/01/01 00:01:42 /info.html
|
||||||
|
2014/01/01 00:01:43 /info.html
|
||||||
|
2014/01/01 00:01:44 /info.html
|
||||||
|
2014/01/01 00:01:45 /info.html
|
||||||
|
2014/01/01 00:01:46 /info.html
|
||||||
|
2014/01/01 00:01:47 /info.html
|
||||||
|
2014/01/01 00:01:48 /info.html
|
||||||
|
2014/01/01 00:01:49 /info.html
|
||||||
|
2014/01/01 00:01:50 /info.html
|
||||||
|
2014/01/01 00:01:51 /info.html
|
||||||
|
2014/01/01 00:01:52 /info.html
|
||||||
|
2014/01/01 00:01:53 /info.html
|
||||||
|
2014/01/01 00:01:54 /info.html
|
||||||
|
2014/01/01 00:01:55 /info.html
|
||||||
|
2014/01/01 00:01:56 /info.html
|
||||||
|
2014/01/01 00:01:57 /info.html
|
||||||
|
2014/01/01 00:01:58 /info.html
|
||||||
|
2014/01/01 00:01:59 /info.html
|
||||||
|
2014/01/01 00:02:00 /info.html
|
||||||
|
2014/01/01 00:02:01 /info.html
|
||||||
|
2014/01/01 00:02:02 /info.html
|
||||||
|
2014/01/01 00:02:03 /info.html
|
||||||
|
2014/01/01 00:02:04 /info.html
|
||||||
|
2014/01/01 00:02:05 /info.html
|
||||||
|
2014/01/01 00:02:06 /info.html
|
||||||
|
2014/01/01 00:02:07 /info.html
|
||||||
|
2014/01/01 00:02:08 /info.html
|
||||||
|
2014/01/01 00:02:09 /info.html
|
||||||
|
2014/01/01 00:02:10 /info.html
|
||||||
|
2014/01/01 00:02:11 /info.html
|
||||||
|
2014/01/01 00:02:12 /info.html
|
||||||
|
2014/01/01 00:02:13 /info.html
|
||||||
|
2014/01/01 00:02:14 /info.html
|
||||||
|
2014/01/01 00:02:15 /info.html
|
||||||
|
2014/01/01 00:02:16 /info.html
|
||||||
|
2014/01/01 00:02:17 /info.html
|
||||||
|
2014/01/01 00:02:18 /info.html
|
21
vendor/github.com/Stebalien/go-bitfield/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Stebalien/go-bitfield/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2018 Steven Allen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
5
vendor/github.com/Stebalien/go-bitfield/README.md
generated
vendored
Normal file
5
vendor/github.com/Stebalien/go-bitfield/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# go-bitfield
|
||||||
|
|
||||||
|
This is a simple bitfield package that's about 2-3x faster than using `big.Int`s
|
||||||
|
from the standard library. It also has a better interface (and, e.g., supports
|
||||||
|
counting ones).
|
114
vendor/github.com/Stebalien/go-bitfield/bitfield.go
generated
vendored
Normal file
114
vendor/github.com/Stebalien/go-bitfield/bitfield.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package bitfield
|
||||||
|
|
||||||
|
// NOTE: Don't bother replacing the divisions/modulo with shifts/ands, go is smart.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewBitfield creates a new fixed-sized Bitfield (allocated up-front).
|
||||||
|
//
|
||||||
|
// Panics if size is not a multiple of 8.
|
||||||
|
func NewBitfield(size int) Bitfield {
|
||||||
|
if size%8 != 0 {
|
||||||
|
panic("Bitfield size must be a multiple of 8")
|
||||||
|
}
|
||||||
|
return make([]byte, size/8)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromBytes constructs a new bitfield from a serialized bitfield.
|
||||||
|
func FromBytes(size int, bits []byte) Bitfield {
|
||||||
|
bf := NewBitfield(size)
|
||||||
|
start := len(bf) - len(bits)
|
||||||
|
if start < 0 {
|
||||||
|
panic("bitfield too small")
|
||||||
|
}
|
||||||
|
copy(bf[start:], bits)
|
||||||
|
return bf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bf Bitfield) offset(i int) (uint, uint8) {
|
||||||
|
return uint(len(bf)) - (uint(i) / 8) - 1, uint8(i) % 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bitfield is, well, a bitfield.
|
||||||
|
type Bitfield []byte
|
||||||
|
|
||||||
|
// Bytes returns the Bitfield as a byte string.
|
||||||
|
//
|
||||||
|
// This function *does not* copy.
|
||||||
|
func (bf Bitfield) Bytes() []byte {
|
||||||
|
for i, b := range bf {
|
||||||
|
if b != 0 {
|
||||||
|
return bf[i:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bit returns the ith bit.
|
||||||
|
//
|
||||||
|
// Panics if the bit is out of bounds.
|
||||||
|
func (bf Bitfield) Bit(i int) bool {
|
||||||
|
idx, off := bf.offset(i)
|
||||||
|
return (bf[idx]>>off)&0x1 != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBit sets the ith bit.
|
||||||
|
//
|
||||||
|
// Panics if the bit is out of bounds.
|
||||||
|
func (bf Bitfield) SetBit(i int) {
|
||||||
|
idx, off := bf.offset(i)
|
||||||
|
bf[idx] |= 1 << off
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnsetBit unsets the ith bit.
|
||||||
|
//
|
||||||
|
// Panics if the bit is out of bounds.
|
||||||
|
func (bf Bitfield) UnsetBit(i int) {
|
||||||
|
idx, off := bf.offset(i)
|
||||||
|
bf[idx] &= 0xFF ^ (1 << off)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBytes sets the bits to the given byte array.
|
||||||
|
//
|
||||||
|
// Panics if 'b' is larger than the bitfield.
|
||||||
|
func (bf Bitfield) SetBytes(b []byte) {
|
||||||
|
start := len(bf) - len(b)
|
||||||
|
if start < 0 {
|
||||||
|
panic("bitfield too small")
|
||||||
|
}
|
||||||
|
for i := range bf[:start] {
|
||||||
|
bf[i] = 0
|
||||||
|
}
|
||||||
|
copy(bf[start:], b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ones returns the number of bits set.
|
||||||
|
func (bf Bitfield) Ones() int {
|
||||||
|
cnt := 0
|
||||||
|
for _, b := range bf {
|
||||||
|
cnt += bits.OnesCount8(b)
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnesBefore returns the number of bits set *before* this bit.
|
||||||
|
func (bf Bitfield) OnesBefore(i int) int {
|
||||||
|
idx, off := bf.offset(i)
|
||||||
|
cnt := bits.OnesCount8(bf[idx] << (8 - off))
|
||||||
|
for _, b := range bf[idx+1:] {
|
||||||
|
cnt += bits.OnesCount8(b)
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnesAfter returns the number of bits set *after* this bit.
|
||||||
|
func (bf Bitfield) OnesAfter(i int) int {
|
||||||
|
idx, off := bf.offset(i)
|
||||||
|
cnt := bits.OnesCount8(bf[idx] >> off)
|
||||||
|
for _, b := range bf[:idx] {
|
||||||
|
cnt += bits.OnesCount8(b)
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
3
vendor/github.com/Stebalien/go-bitfield/go.mod
generated
vendored
Normal file
3
vendor/github.com/Stebalien/go-bitfield/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module github.com/Stebalien/go-bitfield
|
||||||
|
|
||||||
|
go 1.12
|
16
vendor/github.com/Stebalien/go-bitfield/package.json
generated
vendored
Normal file
16
vendor/github.com/Stebalien/go-bitfield/package.json
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"author": "Stebalien",
|
||||||
|
"bugs": {
|
||||||
|
"url": "https://github.com/Stebalien/go-bitfield"
|
||||||
|
},
|
||||||
|
"gx": {
|
||||||
|
"dvcsimport": "github.com/Stebalien/go-bitfield"
|
||||||
|
},
|
||||||
|
"gxVersion": "0.12.1",
|
||||||
|
"language": "go",
|
||||||
|
"license": "MIT",
|
||||||
|
"name": "go-bitfield",
|
||||||
|
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||||
|
"version": "0.1.3"
|
||||||
|
}
|
||||||
|
|
10
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
Normal file
10
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
.idea
|
||||||
|
.DS_Store
|
||||||
|
/server/server.exe
|
||||||
|
/server/server
|
||||||
|
/server/server_dar*
|
||||||
|
/server/server_fre*
|
||||||
|
/server/server_win*
|
||||||
|
/server/server_net*
|
||||||
|
/server/server_ope*
|
||||||
|
CHANGELOG.md
|
31
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
Normal file
31
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.x
|
||||||
|
- tip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
fast_finish: true
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- go get github.com/modocache/gover
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
- go get golang.org/x/tools/cmd/goimports
|
||||||
|
- go get golang.org/x/lint/golint
|
||||||
|
- go get github.com/stretchr/testify/assert
|
||||||
|
- go get github.com/gordonklaus/ineffassign
|
||||||
|
|
||||||
|
script:
|
||||||
|
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
|
||||||
|
- diff <(echo -n) <(gofmt -s -d .)
|
||||||
|
- golint ./... # This won't break the build, just show warnings
|
||||||
|
- ineffassign .
|
||||||
|
- go vet ./...
|
||||||
|
- go test -race -count=1 -coverprofile=queue.coverprofile ./queue
|
||||||
|
- go test -race -count=1 -coverprofile=server.coverprofile ./server
|
||||||
|
- go test -race -count=1 -coverprofile=main.coverprofile
|
||||||
|
- $HOME/gopath/bin/gover
|
||||||
|
- $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci
|
150
vendor/github.com/allegro/bigcache/README.md
generated
vendored
Normal file
150
vendor/github.com/allegro/bigcache/README.md
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# BigCache [![Build Status](https://travis-ci.org/allegro/bigcache.svg?branch=master)](https://travis-ci.org/allegro/bigcache) [![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master) [![GoDoc](https://godoc.org/github.com/allegro/bigcache?status.svg)](https://godoc.org/github.com/allegro/bigcache) [![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache)](https://goreportcard.com/report/github.com/allegro/bigcache)
|
||||||
|
|
||||||
|
Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance.
|
||||||
|
BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
|
||||||
|
therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Simple initialization
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/allegro/bigcache"
|
||||||
|
|
||||||
|
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
|
||||||
|
|
||||||
|
cache.Set("my-unique-key", []byte("value"))
|
||||||
|
|
||||||
|
entry, _ := cache.Get("my-unique-key")
|
||||||
|
fmt.Println(string(entry))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom initialization
|
||||||
|
|
||||||
|
When cache load can be predicted in advance then it is better to use custom initialization because additional memory
|
||||||
|
allocation can be avoided in that way.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/allegro/bigcache"
|
||||||
|
)
|
||||||
|
|
||||||
|
config := bigcache.Config {
|
||||||
|
// number of shards (must be a power of 2)
|
||||||
|
Shards: 1024,
|
||||||
|
// time after which entry can be evicted
|
||||||
|
LifeWindow: 10 * time.Minute,
|
||||||
|
// rps * lifeWindow, used only in initial memory allocation
|
||||||
|
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||||
|
// max entry size in bytes, used only in initial memory allocation
|
||||||
|
MaxEntrySize: 500,
|
||||||
|
// prints information about additional memory allocation
|
||||||
|
Verbose: true,
|
||||||
|
// cache will not allocate more memory than this limit, value in MB
|
||||||
|
// if value is reached then the oldest entries can be overridden for the new ones
|
||||||
|
// 0 value means no size limit
|
||||||
|
HardMaxCacheSize: 8192,
|
||||||
|
// callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||||
|
// for the new entry, or because delete was called. A bitmask representing the reason will be returned.
|
||||||
|
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||||
|
OnRemove: nil,
|
||||||
|
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||||
|
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||||
|
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||||
|
// Ignored if OnRemove is specified.
|
||||||
|
OnRemoveWithReason: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
cache, initErr := bigcache.NewBigCache(config)
|
||||||
|
if initErr != nil {
|
||||||
|
log.Fatal(initErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Set("my-unique-key", []byte("value"))
|
||||||
|
|
||||||
|
if entry, err := cache.Get("my-unique-key"); err == nil {
|
||||||
|
fmt.Println(string(entry))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
|
||||||
|
Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
|
||||||
|
|
||||||
|
### Writes and reads
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
|
||||||
|
|
||||||
|
BenchmarkMapSet-8 3000000 569 ns/op 202 B/op 3 allocs/op
|
||||||
|
BenchmarkConcurrentMapSet-8 1000000 1592 ns/op 347 B/op 8 allocs/op
|
||||||
|
BenchmarkFreeCacheSet-8 3000000 775 ns/op 355 B/op 2 allocs/op
|
||||||
|
BenchmarkBigCacheSet-8 3000000 640 ns/op 303 B/op 2 allocs/op
|
||||||
|
BenchmarkMapGet-8 5000000 407 ns/op 24 B/op 1 allocs/op
|
||||||
|
BenchmarkConcurrentMapGet-8 3000000 558 ns/op 24 B/op 2 allocs/op
|
||||||
|
BenchmarkFreeCacheGet-8 2000000 682 ns/op 136 B/op 2 allocs/op
|
||||||
|
BenchmarkBigCacheGet-8 3000000 512 ns/op 152 B/op 4 allocs/op
|
||||||
|
BenchmarkBigCacheSetParallel-8 10000000 225 ns/op 313 B/op 3 allocs/op
|
||||||
|
BenchmarkFreeCacheSetParallel-8 10000000 218 ns/op 341 B/op 3 allocs/op
|
||||||
|
BenchmarkConcurrentMapSetParallel-8 5000000 318 ns/op 200 B/op 6 allocs/op
|
||||||
|
BenchmarkBigCacheGetParallel-8 20000000 178 ns/op 152 B/op 4 allocs/op
|
||||||
|
BenchmarkFreeCacheGetParallel-8 20000000 295 ns/op 136 B/op 3 allocs/op
|
||||||
|
BenchmarkConcurrentMapGetParallel-8 10000000 237 ns/op 24 B/op 2 allocs/op
|
||||||
|
```
|
||||||
|
|
||||||
|
Writes and reads in bigcache are faster than in freecache.
|
||||||
|
Writes to map are the slowest.
|
||||||
|
|
||||||
|
### GC pause time
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd caches_bench; go run caches_gc_overhead_comparison.go
|
||||||
|
|
||||||
|
Number of entries: 20000000
|
||||||
|
GC pause for bigcache: 5.8658ms
|
||||||
|
GC pause for freecache: 32.4341ms
|
||||||
|
GC pause for map: 52.9661ms
|
||||||
|
```
|
||||||
|
|
||||||
|
Test shows how long are the GC pauses for caches filled with 20mln of entries.
|
||||||
|
Bigcache and freecache have very similar GC pause time.
|
||||||
|
It is clear that both reduce GC overhead in contrast to map
|
||||||
|
which GC pause time took more than 10 seconds.
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
|
||||||
|
BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)).
|
||||||
|
This optimization states that if map without pointers in keys and values is used then GC will omit its content.
|
||||||
|
Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries.
|
||||||
|
|
||||||
|
Entries are kept in bytes array, to omit GC again.
|
||||||
|
Bytes array size can grow to gigabytes without impact on performance
|
||||||
|
because GC will only see single pointer to it.
|
||||||
|
|
||||||
|
## Bigcache vs Freecache
|
||||||
|
|
||||||
|
Both caches provide the same core features but they reduce GC overhead in different ways.
|
||||||
|
Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on
|
||||||
|
slices to reduce number of pointers.
|
||||||
|
|
||||||
|
Results from benchmark tests are presented above.
|
||||||
|
One of the advantage of bigcache over freecache is that you don’t need to know
|
||||||
|
the size of the cache in advance, because when bigcache is full,
|
||||||
|
it can allocate additional memory for new entries instead of
|
||||||
|
overwriting existing ones as freecache does currently.
|
||||||
|
However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config).
|
||||||
|
|
||||||
|
## HTTP Server
|
||||||
|
|
||||||
|
This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package.
|
||||||
|
|
||||||
|
## More
|
||||||
|
|
||||||
|
Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html)
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE))
|
202
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
Normal file
202
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minimumEntriesInShard = 10 // Minimum number of entries in single shard
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
||||||
|
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
|
||||||
|
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||||
|
type BigCache struct {
|
||||||
|
shards []*cacheShard
|
||||||
|
lifeWindow uint64
|
||||||
|
clock clock
|
||||||
|
hash Hasher
|
||||||
|
config Config
|
||||||
|
shardMask uint64
|
||||||
|
maxShardSize uint32
|
||||||
|
close chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
|
||||||
|
type RemoveReason uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Expired means the key is past its LifeWindow.
|
||||||
|
Expired RemoveReason = iota
|
||||||
|
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
|
||||||
|
// entry exceeded the maximum shard size.
|
||||||
|
NoSpace
|
||||||
|
// Deleted means Delete was called and this key was removed as a result.
|
||||||
|
Deleted
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewBigCache initialize new instance of BigCache
|
||||||
|
func NewBigCache(config Config) (*BigCache, error) {
|
||||||
|
return newBigCache(config, &systemClock{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||||
|
|
||||||
|
if !isPowerOfTwo(config.Shards) {
|
||||||
|
return nil, fmt.Errorf("Shards number must be power of two")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Hasher == nil {
|
||||||
|
config.Hasher = newDefaultHasher()
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := &BigCache{
|
||||||
|
shards: make([]*cacheShard, config.Shards),
|
||||||
|
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||||
|
clock: clock,
|
||||||
|
hash: config.Hasher,
|
||||||
|
config: config,
|
||||||
|
shardMask: uint64(config.Shards - 1),
|
||||||
|
maxShardSize: uint32(config.maximumShardSize()),
|
||||||
|
close: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
var onRemove func(wrappedEntry []byte, reason RemoveReason)
|
||||||
|
if config.OnRemove != nil {
|
||||||
|
onRemove = cache.providedOnRemove
|
||||||
|
} else if config.OnRemoveWithReason != nil {
|
||||||
|
onRemove = cache.providedOnRemoveWithReason
|
||||||
|
} else {
|
||||||
|
onRemove = cache.notProvidedOnRemove
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < config.Shards; i++ {
|
||||||
|
cache.shards[i] = initNewShard(config, onRemove, clock)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.CleanWindow > 0 {
|
||||||
|
go func() {
|
||||||
|
ticker := time.NewTicker(config.CleanWindow)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case t := <-ticker.C:
|
||||||
|
cache.cleanUp(uint64(t.Unix()))
|
||||||
|
case <-cache.close:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
return cache, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is used to signal a shutdown of the cache when you are done with it.
|
||||||
|
// This allows the cleaning goroutines to exit and ensures references are not
|
||||||
|
// kept to the cache preventing GC of the entire cache.
|
||||||
|
func (c *BigCache) Close() error {
|
||||||
|
close(c.close)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get reads entry for the key.
|
||||||
|
// It returns an ErrEntryNotFound when
|
||||||
|
// no entry exists for the given key.
|
||||||
|
func (c *BigCache) Get(key string) ([]byte, error) {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.get(key, hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set saves entry under the key
|
||||||
|
func (c *BigCache) Set(key string, entry []byte) error {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.set(key, hashedKey, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the key
|
||||||
|
func (c *BigCache) Delete(key string) error {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.del(key, hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset empties all cache shards
|
||||||
|
func (c *BigCache) Reset() error {
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
shard.reset(c.config)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len computes number of entries in cache
|
||||||
|
func (c *BigCache) Len() int {
|
||||||
|
var len int
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
len += shard.len()
|
||||||
|
}
|
||||||
|
return len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capacity returns amount of bytes store in the cache.
|
||||||
|
func (c *BigCache) Capacity() int {
|
||||||
|
var len int
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
len += shard.capacity()
|
||||||
|
}
|
||||||
|
return len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns cache's statistics
|
||||||
|
func (c *BigCache) Stats() Stats {
|
||||||
|
var s Stats
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
tmp := shard.getStats()
|
||||||
|
s.Hits += tmp.Hits
|
||||||
|
s.Misses += tmp.Misses
|
||||||
|
s.DelHits += tmp.DelHits
|
||||||
|
s.DelMisses += tmp.DelMisses
|
||||||
|
s.Collisions += tmp.Collisions
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
|
||||||
|
func (c *BigCache) Iterator() *EntryInfoIterator {
|
||||||
|
return newIterator(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||||
|
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||||
|
if currentTimestamp-oldestTimestamp > c.lifeWindow {
|
||||||
|
evict(Expired)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) cleanUp(currentTimestamp uint64) {
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
shard.cleanUp(currentTimestamp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
|
||||||
|
return c.shards[hashedKey&c.shardMask]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 {
|
||||||
|
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
}
|
14
vendor/github.com/allegro/bigcache/bytes.go
generated
vendored
Normal file
14
vendor/github.com/allegro/bigcache/bytes.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func bytesToString(b []byte) string {
|
||||||
|
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
|
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
|
||||||
|
return *(*string)(unsafe.Pointer(&strHeader))
|
||||||
|
}
|
7
vendor/github.com/allegro/bigcache/bytes_appengine.go
generated
vendored
Normal file
7
vendor/github.com/allegro/bigcache/bytes_appengine.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package bigcache
|
||||||
|
|
||||||
|
func bytesToString(b []byte) string {
|
||||||
|
return string(b)
|
||||||
|
}
|
86
vendor/github.com/allegro/bigcache/config.go
generated
vendored
Normal file
86
vendor/github.com/allegro/bigcache/config.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Config for BigCache
|
||||||
|
type Config struct {
|
||||||
|
// Number of cache shards, value must be a power of two
|
||||||
|
Shards int
|
||||||
|
// Time after which entry can be evicted
|
||||||
|
LifeWindow time.Duration
|
||||||
|
// Interval between removing expired entries (clean up).
|
||||||
|
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
|
||||||
|
CleanWindow time.Duration
|
||||||
|
// Max number of entries in life window. Used only to calculate initial size for cache shards.
|
||||||
|
// When proper value is set then additional memory allocation does not occur.
|
||||||
|
MaxEntriesInWindow int
|
||||||
|
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
|
||||||
|
MaxEntrySize int
|
||||||
|
// Verbose mode prints information about new memory allocation
|
||||||
|
Verbose bool
|
||||||
|
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
|
||||||
|
Hasher Hasher
|
||||||
|
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
|
||||||
|
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
|
||||||
|
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
|
||||||
|
// the oldest entries are overridden for the new ones.
|
||||||
|
HardMaxCacheSize int
|
||||||
|
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||||
|
// for the new entry, or because delete was called.
|
||||||
|
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||||
|
OnRemove func(key string, entry []byte)
|
||||||
|
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||||
|
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||||
|
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||||
|
// Ignored if OnRemove is specified.
|
||||||
|
OnRemoveWithReason func(key string, entry []byte, reason RemoveReason)
|
||||||
|
|
||||||
|
onRemoveFilter int
|
||||||
|
|
||||||
|
// Logger is a logging interface and used in combination with `Verbose`
|
||||||
|
// Defaults to `DefaultLogger()`
|
||||||
|
Logger Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig initializes config with default values.
|
||||||
|
// When load for BigCache can be predicted in advance then it is better to use custom config.
|
||||||
|
func DefaultConfig(eviction time.Duration) Config {
|
||||||
|
return Config{
|
||||||
|
Shards: 1024,
|
||||||
|
LifeWindow: eviction,
|
||||||
|
CleanWindow: 0,
|
||||||
|
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||||
|
MaxEntrySize: 500,
|
||||||
|
Verbose: true,
|
||||||
|
Hasher: newDefaultHasher(),
|
||||||
|
HardMaxCacheSize: 0,
|
||||||
|
Logger: DefaultLogger(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialShardSize computes initial shard size
|
||||||
|
func (c Config) initialShardSize() int {
|
||||||
|
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
|
||||||
|
}
|
||||||
|
|
||||||
|
// maximumShardSize computes maximum shard size
|
||||||
|
func (c Config) maximumShardSize() int {
|
||||||
|
maxShardSize := 0
|
||||||
|
|
||||||
|
if c.HardMaxCacheSize > 0 {
|
||||||
|
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxShardSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason.
|
||||||
|
// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu.
|
||||||
|
func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config {
|
||||||
|
c.onRemoveFilter = 0
|
||||||
|
for i := range reasons {
|
||||||
|
c.onRemoveFilter |= 1 << uint(reasons[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
62
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
Normal file
62
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
timestampSizeInBytes = 8 // Number of bytes used for timestamp
|
||||||
|
hashSizeInBytes = 8 // Number of bytes used for hash
|
||||||
|
keySizeInBytes = 2 // Number of bytes used for size of entry key
|
||||||
|
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
|
||||||
|
)
|
||||||
|
|
||||||
|
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
|
||||||
|
keyLength := len(key)
|
||||||
|
blobLength := len(entry) + headersSizeInBytes + keyLength
|
||||||
|
|
||||||
|
if blobLength > len(*buffer) {
|
||||||
|
*buffer = make([]byte, blobLength)
|
||||||
|
}
|
||||||
|
blob := *buffer
|
||||||
|
|
||||||
|
binary.LittleEndian.PutUint64(blob, timestamp)
|
||||||
|
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
|
||||||
|
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
|
||||||
|
copy(blob[headersSizeInBytes:], key)
|
||||||
|
copy(blob[headersSizeInBytes+keyLength:], entry)
|
||||||
|
|
||||||
|
return blob[:blobLength]
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEntry(data []byte) []byte {
|
||||||
|
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||||
|
|
||||||
|
// copy on read
|
||||||
|
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
|
||||||
|
copy(dst, data[headersSizeInBytes+length:])
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func readTimestampFromEntry(data []byte) uint64 {
|
||||||
|
return binary.LittleEndian.Uint64(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readKeyFromEntry(data []byte) string {
|
||||||
|
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||||
|
|
||||||
|
// copy on read
|
||||||
|
dst := make([]byte, length)
|
||||||
|
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
|
||||||
|
|
||||||
|
return bytesToString(dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHashFromEntry(data []byte) uint64 {
|
||||||
|
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetKeyFromEntry(data []byte) {
|
||||||
|
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
|
||||||
|
}
|
6
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
Normal file
6
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
|
||||||
|
var ErrEntryNotFound = errors.New("Entry not found")
|
239
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
Normal file
239
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/allegro/bigcache/queue"
|
||||||
|
)
|
||||||
|
|
||||||
|
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason)
|
||||||
|
|
||||||
|
type cacheShard struct {
|
||||||
|
hashmap map[uint64]uint32
|
||||||
|
entries queue.BytesQueue
|
||||||
|
lock sync.RWMutex
|
||||||
|
entryBuffer []byte
|
||||||
|
onRemove onRemoveCallback
|
||||||
|
|
||||||
|
isVerbose bool
|
||||||
|
logger Logger
|
||||||
|
clock clock
|
||||||
|
lifeWindow uint64
|
||||||
|
|
||||||
|
stats Stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
itemIndex := s.hashmap[hashedKey]
|
||||||
|
|
||||||
|
if itemIndex == 0 {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.miss()
|
||||||
|
return nil, ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||||
|
if err != nil {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.miss()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
|
||||||
|
if s.isVerbose {
|
||||||
|
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
|
||||||
|
}
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.collision()
|
||||||
|
return nil, ErrEntryNotFound
|
||||||
|
}
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.hit()
|
||||||
|
return readEntry(wrappedEntry), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||||
|
currentTimestamp := uint64(s.clock.epoch())
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
|
||||||
|
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
|
||||||
|
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
|
||||||
|
resetKeyFromEntry(previousEntry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldestEntry, err := s.entries.Peek(); err == nil {
|
||||||
|
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
|
||||||
|
|
||||||
|
for {
|
||||||
|
if index, err := s.entries.Push(w); err == nil {
|
||||||
|
s.hashmap[hashedKey] = uint32(index)
|
||||||
|
s.lock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.removeOldestEntry(NoSpace) != nil {
|
||||||
|
s.lock.Unlock()
|
||||||
|
return fmt.Errorf("entry is bigger than max shard size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||||
|
s.lock.RLock()
|
||||||
|
itemIndex := s.hashmap[hashedKey]
|
||||||
|
|
||||||
|
if itemIndex == 0 {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.delmiss()
|
||||||
|
return ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||||
|
if err != nil {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.delmiss()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.lock.RUnlock()
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
{
|
||||||
|
delete(s.hashmap, hashedKey)
|
||||||
|
s.onRemove(wrappedEntry, Deleted)
|
||||||
|
resetKeyFromEntry(wrappedEntry)
|
||||||
|
}
|
||||||
|
s.lock.Unlock()
|
||||||
|
|
||||||
|
s.delhit()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||||
|
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||||
|
if currentTimestamp-oldestTimestamp > s.lifeWindow {
|
||||||
|
evict(Expired)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
|
||||||
|
s.lock.Lock()
|
||||||
|
for {
|
||||||
|
if oldestEntry, err := s.entries.Peek(); err != nil {
|
||||||
|
break
|
||||||
|
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getOldestEntry() ([]byte, error) {
|
||||||
|
return s.entries.Peek()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getEntry(index int) ([]byte, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
entry, err := s.entries.Get(index)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
|
||||||
|
return entry, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
||||||
|
s.lock.RLock()
|
||||||
|
keys = make([]uint32, len(s.hashmap))
|
||||||
|
|
||||||
|
for _, index := range s.hashmap {
|
||||||
|
keys[next] = index
|
||||||
|
next++
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return keys, next
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {
|
||||||
|
oldest, err := s.entries.Pop()
|
||||||
|
if err == nil {
|
||||||
|
hash := readHashFromEntry(oldest)
|
||||||
|
delete(s.hashmap, hash)
|
||||||
|
s.onRemove(oldest, reason)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) reset(config Config) {
|
||||||
|
s.lock.Lock()
|
||||||
|
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
|
||||||
|
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
|
||||||
|
s.entries.Reset()
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) len() int {
|
||||||
|
s.lock.RLock()
|
||||||
|
res := len(s.hashmap)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) capacity() int {
|
||||||
|
s.lock.RLock()
|
||||||
|
res := s.entries.Capacity()
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getStats() Stats {
|
||||||
|
var stats = Stats{
|
||||||
|
Hits: atomic.LoadInt64(&s.stats.Hits),
|
||||||
|
Misses: atomic.LoadInt64(&s.stats.Misses),
|
||||||
|
DelHits: atomic.LoadInt64(&s.stats.DelHits),
|
||||||
|
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
|
||||||
|
Collisions: atomic.LoadInt64(&s.stats.Collisions),
|
||||||
|
}
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) hit() {
|
||||||
|
atomic.AddInt64(&s.stats.Hits, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) miss() {
|
||||||
|
atomic.AddInt64(&s.stats.Misses, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) delhit() {
|
||||||
|
atomic.AddInt64(&s.stats.DelHits, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) delmiss() {
|
||||||
|
atomic.AddInt64(&s.stats.DelMisses, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) collision() {
|
||||||
|
atomic.AddInt64(&s.stats.Collisions, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
|
||||||
|
return &cacheShard{
|
||||||
|
hashmap: make(map[uint64]uint32, config.initialShardSize()),
|
||||||
|
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
|
||||||
|
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
|
||||||
|
onRemove: callback,
|
||||||
|
|
||||||
|
isVerbose: config.Verbose,
|
||||||
|
logger: newLogger(config.Logger),
|
||||||
|
clock: clock,
|
||||||
|
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||||
|
}
|
||||||
|
}
|
19
vendor/github.com/bren2010/proquint/LICENSE.md
generated
vendored
Normal file
19
vendor/github.com/bren2010/proquint/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2014 Brendan McMillion
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
6
vendor/github.com/bren2010/proquint/README.md
generated
vendored
Normal file
6
vendor/github.com/bren2010/proquint/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
Proquint
|
||||||
|
-------
|
||||||
|
|
||||||
|
Golang implementation of [Proquint Pronounceable Identifiers](https://github.com/deoxxa/proquint).
|
||||||
|
|
||||||
|
|
123
vendor/github.com/bren2010/proquint/proquint.go
generated
vendored
Normal file
123
vendor/github.com/bren2010/proquint/proquint.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
Copyright (c) 2014 Brendan McMillion
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package proquint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"strings"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
conse = [...]byte{'b', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n',
|
||||||
|
'p', 'r', 's', 't', 'v', 'z'}
|
||||||
|
vowse = [...]byte{'a', 'i', 'o', 'u'}
|
||||||
|
|
||||||
|
consd = map[byte] uint16 {
|
||||||
|
'b' : 0, 'd' : 1, 'f' : 2, 'g' : 3,
|
||||||
|
'h' : 4, 'j' : 5, 'k' : 6, 'l' : 7,
|
||||||
|
'm' : 8, 'n' : 9, 'p' : 10, 'r' : 11,
|
||||||
|
's' : 12, 't' : 13, 'v' : 14, 'z' : 15,
|
||||||
|
}
|
||||||
|
|
||||||
|
vowsd = map[byte] uint16 {
|
||||||
|
'a' : 0, 'i' : 1, 'o' : 2, 'u' : 3,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests if a given string is a Proquint identifier
|
||||||
|
*
|
||||||
|
* @param {string} str The candidate string.
|
||||||
|
*
|
||||||
|
* @return {bool} Whether or not it qualifies.
|
||||||
|
* @return {error} Error
|
||||||
|
*/
|
||||||
|
func IsProquint(str string) (bool, error) {
|
||||||
|
exp := "^([abdfghijklmnoprstuvz]{5}-)*[abdfghijklmnoprstuvz]{5}$"
|
||||||
|
ok, err := regexp.MatchString(exp, str)
|
||||||
|
|
||||||
|
return ok, err
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Encodes an arbitrary byte slice into an identifier.
|
||||||
|
*
|
||||||
|
* @param {[]byte} buf Slice of bytes to encode.
|
||||||
|
*
|
||||||
|
* @return {string} The given byte slice as an identifier.
|
||||||
|
*/
|
||||||
|
func Encode(buf []byte) string {
|
||||||
|
var out bytes.Buffer
|
||||||
|
|
||||||
|
for i := 0; i < len(buf); i = i + 2 {
|
||||||
|
var n uint16 = (uint16(buf[i]) * 256) + uint16(buf[i + 1])
|
||||||
|
|
||||||
|
var (
|
||||||
|
c1 = n & 0x0f
|
||||||
|
v1 = (n >> 4) & 0x03
|
||||||
|
c2 = (n >> 6) & 0x0f
|
||||||
|
v2 = (n >> 10) & 0x03
|
||||||
|
c3 = (n >> 12) & 0x0f
|
||||||
|
)
|
||||||
|
|
||||||
|
out.WriteByte(conse[c1])
|
||||||
|
out.WriteByte(vowse[v1])
|
||||||
|
out.WriteByte(conse[c2])
|
||||||
|
out.WriteByte(vowse[v2])
|
||||||
|
out.WriteByte(conse[c3])
|
||||||
|
|
||||||
|
if (i + 2) < len(buf) {
|
||||||
|
out.WriteByte('-')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decodes an identifier into its corresponding byte slice.
|
||||||
|
*
|
||||||
|
* @param {string} str Identifier to convert.
|
||||||
|
*
|
||||||
|
* @return {[]byte} The identifier as a byte slice.
|
||||||
|
*/
|
||||||
|
func Decode(str string) []byte {
|
||||||
|
var (
|
||||||
|
out bytes.Buffer
|
||||||
|
bits []string = strings.Split(str, "-")
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < len(bits); i++ {
|
||||||
|
var x uint16 = consd[bits[i][0]] +
|
||||||
|
(vowsd[bits[i][1]] << 4) +
|
||||||
|
(consd[bits[i][2]] << 6) +
|
||||||
|
(vowsd[bits[i][3]] << 10) +
|
||||||
|
(consd[bits[i][4]] << 12)
|
||||||
|
|
||||||
|
out.WriteByte(byte(x >> 8))
|
||||||
|
out.WriteByte(byte(x))
|
||||||
|
}
|
||||||
|
|
||||||
|
return out.Bytes()
|
||||||
|
}
|
540
vendor/github.com/btcsuite/btcd/btcec/signature.go
generated
vendored
Normal file
540
vendor/github.com/btcsuite/btcd/btcec/signature.go
generated
vendored
Normal file
@ -0,0 +1,540 @@
|
|||||||
|
// Copyright (c) 2013-2017 The btcsuite developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package btcec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors returned by canonicalPadding.
|
||||||
|
var (
|
||||||
|
errNegativeValue = errors.New("value may be interpreted as negative")
|
||||||
|
errExcessivelyPaddedValue = errors.New("value is excessively padded")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Signature is a type representing an ecdsa signature.
|
||||||
|
type Signature struct {
|
||||||
|
R *big.Int
|
||||||
|
S *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Used in RFC6979 implementation when testing the nonce for correctness
|
||||||
|
one = big.NewInt(1)
|
||||||
|
|
||||||
|
// oneInitializer is used to fill a byte slice with byte 0x01. It is provided
|
||||||
|
// here to avoid the need to create it multiple times.
|
||||||
|
oneInitializer = []byte{0x01}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Serialize returns the ECDSA signature in the more strict DER format. Note
|
||||||
|
// that the serialized bytes returned do not include the appended hash type
|
||||||
|
// used in Bitcoin signature scripts.
|
||||||
|
//
|
||||||
|
// encoding/asn1 is broken so we hand roll this output:
|
||||||
|
//
|
||||||
|
// 0x30 <length> 0x02 <length r> r 0x02 <length s> s
|
||||||
|
func (sig *Signature) Serialize() []byte {
|
||||||
|
// low 'S' malleability breaker
|
||||||
|
sigS := sig.S
|
||||||
|
if sigS.Cmp(S256().halfOrder) == 1 {
|
||||||
|
sigS = new(big.Int).Sub(S256().N, sigS)
|
||||||
|
}
|
||||||
|
// Ensure the encoded bytes for the r and s values are canonical and
|
||||||
|
// thus suitable for DER encoding.
|
||||||
|
rb := canonicalizeInt(sig.R)
|
||||||
|
sb := canonicalizeInt(sigS)
|
||||||
|
|
||||||
|
// total length of returned signature is 1 byte for each magic and
|
||||||
|
// length (6 total), plus lengths of r and s
|
||||||
|
length := 6 + len(rb) + len(sb)
|
||||||
|
b := make([]byte, length)
|
||||||
|
|
||||||
|
b[0] = 0x30
|
||||||
|
b[1] = byte(length - 2)
|
||||||
|
b[2] = 0x02
|
||||||
|
b[3] = byte(len(rb))
|
||||||
|
offset := copy(b[4:], rb) + 4
|
||||||
|
b[offset] = 0x02
|
||||||
|
b[offset+1] = byte(len(sb))
|
||||||
|
copy(b[offset+2:], sb)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify calls ecdsa.Verify to verify the signature of hash using the public
|
||||||
|
// key. It returns true if the signature is valid, false otherwise.
|
||||||
|
func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool {
|
||||||
|
return ecdsa.Verify(pubKey.ToECDSA(), hash, sig.R, sig.S)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEqual compares this Signature instance to the one passed, returning true
|
||||||
|
// if both Signatures are equivalent. A signature is equivalent to another, if
|
||||||
|
// they both have the same scalar value for R and S.
|
||||||
|
func (sig *Signature) IsEqual(otherSig *Signature) bool {
|
||||||
|
return sig.R.Cmp(otherSig.R) == 0 &&
|
||||||
|
sig.S.Cmp(otherSig.S) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinSigLen is the minimum length of a DER encoded signature and is when both R
|
||||||
|
// and S are 1 byte each.
|
||||||
|
// 0x30 + <1-byte> + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte>
|
||||||
|
const MinSigLen = 8
|
||||||
|
|
||||||
|
func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) {
|
||||||
|
// Originally this code used encoding/asn1 in order to parse the
|
||||||
|
// signature, but a number of problems were found with this approach.
|
||||||
|
// Despite the fact that signatures are stored as DER, the difference
|
||||||
|
// between go's idea of a bignum (and that they have sign) doesn't agree
|
||||||
|
// with the openssl one (where they do not). The above is true as of
|
||||||
|
// Go 1.1. In the end it was simpler to rewrite the code to explicitly
|
||||||
|
// understand the format which is this:
|
||||||
|
// 0x30 <length of whole message> <0x02> <length of R> <R> 0x2
|
||||||
|
// <length of S> <S>.
|
||||||
|
|
||||||
|
signature := &Signature{}
|
||||||
|
|
||||||
|
if len(sigStr) < MinSigLen {
|
||||||
|
return nil, errors.New("malformed signature: too short")
|
||||||
|
}
|
||||||
|
// 0x30
|
||||||
|
index := 0
|
||||||
|
if sigStr[index] != 0x30 {
|
||||||
|
return nil, errors.New("malformed signature: no header magic")
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
// length of remaining message
|
||||||
|
siglen := sigStr[index]
|
||||||
|
index++
|
||||||
|
|
||||||
|
// siglen should be less than the entire message and greater than
|
||||||
|
// the minimal message size.
|
||||||
|
if int(siglen+2) > len(sigStr) || int(siglen+2) < MinSigLen {
|
||||||
|
return nil, errors.New("malformed signature: bad length")
|
||||||
|
}
|
||||||
|
// trim the slice we're working on so we only look at what matters.
|
||||||
|
sigStr = sigStr[:siglen+2]
|
||||||
|
|
||||||
|
// 0x02
|
||||||
|
if sigStr[index] != 0x02 {
|
||||||
|
return nil,
|
||||||
|
errors.New("malformed signature: no 1st int marker")
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
|
||||||
|
// Length of signature R.
|
||||||
|
rLen := int(sigStr[index])
|
||||||
|
// must be positive, must be able to fit in another 0x2, <len> <s>
|
||||||
|
// hence the -3. We assume that the length must be at least one byte.
|
||||||
|
index++
|
||||||
|
if rLen <= 0 || rLen > len(sigStr)-index-3 {
|
||||||
|
return nil, errors.New("malformed signature: bogus R length")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then R itself.
|
||||||
|
rBytes := sigStr[index : index+rLen]
|
||||||
|
if der {
|
||||||
|
switch err := canonicalPadding(rBytes); err {
|
||||||
|
case errNegativeValue:
|
||||||
|
return nil, errors.New("signature R is negative")
|
||||||
|
case errExcessivelyPaddedValue:
|
||||||
|
return nil, errors.New("signature R is excessively padded")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
signature.R = new(big.Int).SetBytes(rBytes)
|
||||||
|
index += rLen
|
||||||
|
// 0x02. length already checked in previous if.
|
||||||
|
if sigStr[index] != 0x02 {
|
||||||
|
return nil, errors.New("malformed signature: no 2nd int marker")
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
|
||||||
|
// Length of signature S.
|
||||||
|
sLen := int(sigStr[index])
|
||||||
|
index++
|
||||||
|
// S should be the rest of the string.
|
||||||
|
if sLen <= 0 || sLen > len(sigStr)-index {
|
||||||
|
return nil, errors.New("malformed signature: bogus S length")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then S itself.
|
||||||
|
sBytes := sigStr[index : index+sLen]
|
||||||
|
if der {
|
||||||
|
switch err := canonicalPadding(sBytes); err {
|
||||||
|
case errNegativeValue:
|
||||||
|
return nil, errors.New("signature S is negative")
|
||||||
|
case errExcessivelyPaddedValue:
|
||||||
|
return nil, errors.New("signature S is excessively padded")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
signature.S = new(big.Int).SetBytes(sBytes)
|
||||||
|
index += sLen
|
||||||
|
|
||||||
|
// sanity check length parsing
|
||||||
|
if index != len(sigStr) {
|
||||||
|
return nil, fmt.Errorf("malformed signature: bad final length %v != %v",
|
||||||
|
index, len(sigStr))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify also checks this, but we can be more sure that we parsed
|
||||||
|
// correctly if we verify here too.
|
||||||
|
// FWIW the ecdsa spec states that R and S must be | 1, N - 1 |
|
||||||
|
// but crypto/ecdsa only checks for Sign != 0. Mirror that.
|
||||||
|
if signature.R.Sign() != 1 {
|
||||||
|
return nil, errors.New("signature R isn't 1 or more")
|
||||||
|
}
|
||||||
|
if signature.S.Sign() != 1 {
|
||||||
|
return nil, errors.New("signature S isn't 1 or more")
|
||||||
|
}
|
||||||
|
if signature.R.Cmp(curve.Params().N) >= 0 {
|
||||||
|
return nil, errors.New("signature R is >= curve.N")
|
||||||
|
}
|
||||||
|
if signature.S.Cmp(curve.Params().N) >= 0 {
|
||||||
|
return nil, errors.New("signature S is >= curve.N")
|
||||||
|
}
|
||||||
|
|
||||||
|
return signature, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSignature parses a signature in BER format for the curve type `curve'
|
||||||
|
// into a Signature type, perfoming some basic sanity checks. If parsing
|
||||||
|
// according to the more strict DER format is needed, use ParseDERSignature.
|
||||||
|
func ParseSignature(sigStr []byte, curve elliptic.Curve) (*Signature, error) {
|
||||||
|
return parseSig(sigStr, curve, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDERSignature parses a signature in DER format for the curve type
|
||||||
|
// `curve` into a Signature type. If parsing according to the less strict
|
||||||
|
// BER format is needed, use ParseSignature.
|
||||||
|
func ParseDERSignature(sigStr []byte, curve elliptic.Curve) (*Signature, error) {
|
||||||
|
return parseSig(sigStr, curve, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// canonicalizeInt returns the bytes for the passed big integer adjusted as
|
||||||
|
// necessary to ensure that a big-endian encoded integer can't possibly be
|
||||||
|
// misinterpreted as a negative number. This can happen when the most
|
||||||
|
// significant bit is set, so it is padded by a leading zero byte in this case.
|
||||||
|
// Also, the returned bytes will have at least a single byte when the passed
|
||||||
|
// value is 0. This is required for DER encoding.
|
||||||
|
func canonicalizeInt(val *big.Int) []byte {
|
||||||
|
b := val.Bytes()
|
||||||
|
if len(b) == 0 {
|
||||||
|
b = []byte{0x00}
|
||||||
|
}
|
||||||
|
if b[0]&0x80 != 0 {
|
||||||
|
paddedBytes := make([]byte, len(b)+1)
|
||||||
|
copy(paddedBytes[1:], b)
|
||||||
|
b = paddedBytes
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// canonicalPadding checks whether a big-endian encoded integer could
|
||||||
|
// possibly be misinterpreted as a negative number (even though OpenSSL
|
||||||
|
// treats all numbers as unsigned), or if there is any unnecessary
|
||||||
|
// leading zero padding.
|
||||||
|
func canonicalPadding(b []byte) error {
|
||||||
|
switch {
|
||||||
|
case b[0]&0x80 == 0x80:
|
||||||
|
return errNegativeValue
|
||||||
|
case len(b) > 1 && b[0] == 0x00 && b[1]&0x80 != 0x80:
|
||||||
|
return errExcessivelyPaddedValue
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashToInt converts a hash value to an integer. There is some disagreement
|
||||||
|
// about how this is done. [NSA] suggests that this is done in the obvious
|
||||||
|
// manner, but [SECG] truncates the hash to the bit-length of the curve order
|
||||||
|
// first. We follow [SECG] because that's what OpenSSL does. Additionally,
|
||||||
|
// OpenSSL right shifts excess bits from the number if the hash is too large
|
||||||
|
// and we mirror that too.
|
||||||
|
// This is borrowed from crypto/ecdsa.
|
||||||
|
func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
|
||||||
|
orderBits := c.Params().N.BitLen()
|
||||||
|
orderBytes := (orderBits + 7) / 8
|
||||||
|
if len(hash) > orderBytes {
|
||||||
|
hash = hash[:orderBytes]
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := new(big.Int).SetBytes(hash)
|
||||||
|
excess := len(hash)*8 - orderBits
|
||||||
|
if excess > 0 {
|
||||||
|
ret.Rsh(ret, uint(excess))
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// recoverKeyFromSignature recovers a public key from the signature "sig" on the
|
||||||
|
// given message hash "msg". Based on the algorithm found in section 5.1.5 of
|
||||||
|
// SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details
|
||||||
|
// in the inner loop in Step 1. The counter provided is actually the j parameter
|
||||||
|
// of the loop * 2 - on the first iteration of j we do the R case, else the -R
|
||||||
|
// case in step 1.6. This counter is used in the bitcoin compressed signature
|
||||||
|
// format and thus we match bitcoind's behaviour here.
|
||||||
|
func recoverKeyFromSignature(curve *KoblitzCurve, sig *Signature, msg []byte,
|
||||||
|
iter int, doChecks bool) (*PublicKey, error) {
|
||||||
|
// 1.1 x = (n * i) + r
|
||||||
|
Rx := new(big.Int).Mul(curve.Params().N,
|
||||||
|
new(big.Int).SetInt64(int64(iter/2)))
|
||||||
|
Rx.Add(Rx, sig.R)
|
||||||
|
if Rx.Cmp(curve.Params().P) != -1 {
|
||||||
|
return nil, errors.New("calculated Rx is larger than curve P")
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert 02<Rx> to point R. (step 1.2 and 1.3). If we are on an odd
|
||||||
|
// iteration then 1.6 will be done with -R, so we calculate the other
|
||||||
|
// term when uncompressing the point.
|
||||||
|
Ry, err := decompressPoint(curve, Rx, iter%2 == 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1.4 Check n*R is point at infinity
|
||||||
|
if doChecks {
|
||||||
|
nRx, nRy := curve.ScalarMult(Rx, Ry, curve.Params().N.Bytes())
|
||||||
|
if nRx.Sign() != 0 || nRy.Sign() != 0 {
|
||||||
|
return nil, errors.New("n*R does not equal the point at infinity")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1.5 calculate e from message using the same algorithm as ecdsa
|
||||||
|
// signature calculation.
|
||||||
|
e := hashToInt(msg, curve)
|
||||||
|
|
||||||
|
// Step 1.6.1:
|
||||||
|
// We calculate the two terms sR and eG separately multiplied by the
|
||||||
|
// inverse of r (from the signature). We then add them to calculate
|
||||||
|
// Q = r^-1(sR-eG)
|
||||||
|
invr := new(big.Int).ModInverse(sig.R, curve.Params().N)
|
||||||
|
|
||||||
|
// first term.
|
||||||
|
invrS := new(big.Int).Mul(invr, sig.S)
|
||||||
|
invrS.Mod(invrS, curve.Params().N)
|
||||||
|
sRx, sRy := curve.ScalarMult(Rx, Ry, invrS.Bytes())
|
||||||
|
|
||||||
|
// second term.
|
||||||
|
e.Neg(e)
|
||||||
|
e.Mod(e, curve.Params().N)
|
||||||
|
e.Mul(e, invr)
|
||||||
|
e.Mod(e, curve.Params().N)
|
||||||
|
minuseGx, minuseGy := curve.ScalarBaseMult(e.Bytes())
|
||||||
|
|
||||||
|
// TODO: this would be faster if we did a mult and add in one
|
||||||
|
// step to prevent the jacobian conversion back and forth.
|
||||||
|
Qx, Qy := curve.Add(sRx, sRy, minuseGx, minuseGy)
|
||||||
|
|
||||||
|
return &PublicKey{
|
||||||
|
Curve: curve,
|
||||||
|
X: Qx,
|
||||||
|
Y: Qy,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignCompact produces a compact signature of the data in hash with the given
|
||||||
|
// private key on the given koblitz curve. The isCompressed parameter should
|
||||||
|
// be used to detail if the given signature should reference a compressed
|
||||||
|
// public key or not. If successful the bytes of the compact signature will be
|
||||||
|
// returned in the format:
|
||||||
|
// <(byte of 27+public key solution)+4 if compressed >< padded bytes for signature R><padded bytes for signature S>
|
||||||
|
// where the R and S parameters are padde up to the bitlengh of the curve.
|
||||||
|
func SignCompact(curve *KoblitzCurve, key *PrivateKey,
|
||||||
|
hash []byte, isCompressedKey bool) ([]byte, error) {
|
||||||
|
sig, err := key.Sign(hash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// bitcoind checks the bit length of R and S here. The ecdsa signature
|
||||||
|
// algorithm returns R and S mod N therefore they will be the bitsize of
|
||||||
|
// the curve, and thus correctly sized.
|
||||||
|
for i := 0; i < (curve.H+1)*2; i++ {
|
||||||
|
pk, err := recoverKeyFromSignature(curve, sig, hash, i, true)
|
||||||
|
if err == nil && pk.X.Cmp(key.X) == 0 && pk.Y.Cmp(key.Y) == 0 {
|
||||||
|
result := make([]byte, 1, 2*curve.byteSize+1)
|
||||||
|
result[0] = 27 + byte(i)
|
||||||
|
if isCompressedKey {
|
||||||
|
result[0] += 4
|
||||||
|
}
|
||||||
|
// Not sure this needs rounding but safer to do so.
|
||||||
|
curvelen := (curve.BitSize + 7) / 8
|
||||||
|
|
||||||
|
// Pad R and S to curvelen if needed.
|
||||||
|
bytelen := (sig.R.BitLen() + 7) / 8
|
||||||
|
if bytelen < curvelen {
|
||||||
|
result = append(result,
|
||||||
|
make([]byte, curvelen-bytelen)...)
|
||||||
|
}
|
||||||
|
result = append(result, sig.R.Bytes()...)
|
||||||
|
|
||||||
|
bytelen = (sig.S.BitLen() + 7) / 8
|
||||||
|
if bytelen < curvelen {
|
||||||
|
result = append(result,
|
||||||
|
make([]byte, curvelen-bytelen)...)
|
||||||
|
}
|
||||||
|
result = append(result, sig.S.Bytes()...)
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("no valid solution for pubkey found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecoverCompact verifies the compact signature "signature" of "hash" for the
|
||||||
|
// Koblitz curve in "curve". If the signature matches then the recovered public
|
||||||
|
// key will be returned as well as a boolen if the original key was compressed
|
||||||
|
// or not, else an error will be returned.
|
||||||
|
func RecoverCompact(curve *KoblitzCurve, signature,
|
||||||
|
hash []byte) (*PublicKey, bool, error) {
|
||||||
|
bitlen := (curve.BitSize + 7) / 8
|
||||||
|
if len(signature) != 1+bitlen*2 {
|
||||||
|
return nil, false, errors.New("invalid compact signature size")
|
||||||
|
}
|
||||||
|
|
||||||
|
iteration := int((signature[0] - 27) & ^byte(4))
|
||||||
|
|
||||||
|
// format is <header byte><bitlen R><bitlen S>
|
||||||
|
sig := &Signature{
|
||||||
|
R: new(big.Int).SetBytes(signature[1 : bitlen+1]),
|
||||||
|
S: new(big.Int).SetBytes(signature[bitlen+1:]),
|
||||||
|
}
|
||||||
|
// The iteration used here was encoded
|
||||||
|
key, err := recoverKeyFromSignature(curve, sig, hash, iteration, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, ((signature[0] - 27) & 4) == 4, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// signRFC6979 generates a deterministic ECDSA signature according to RFC 6979 and BIP 62.
|
||||||
|
func signRFC6979(privateKey *PrivateKey, hash []byte) (*Signature, error) {
|
||||||
|
|
||||||
|
privkey := privateKey.ToECDSA()
|
||||||
|
N := S256().N
|
||||||
|
halfOrder := S256().halfOrder
|
||||||
|
k := nonceRFC6979(privkey.D, hash)
|
||||||
|
inv := new(big.Int).ModInverse(k, N)
|
||||||
|
r, _ := privkey.Curve.ScalarBaseMult(k.Bytes())
|
||||||
|
r.Mod(r, N)
|
||||||
|
|
||||||
|
if r.Sign() == 0 {
|
||||||
|
return nil, errors.New("calculated R is zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
e := hashToInt(hash, privkey.Curve)
|
||||||
|
s := new(big.Int).Mul(privkey.D, r)
|
||||||
|
s.Add(s, e)
|
||||||
|
s.Mul(s, inv)
|
||||||
|
s.Mod(s, N)
|
||||||
|
|
||||||
|
if s.Cmp(halfOrder) == 1 {
|
||||||
|
s.Sub(N, s)
|
||||||
|
}
|
||||||
|
if s.Sign() == 0 {
|
||||||
|
return nil, errors.New("calculated S is zero")
|
||||||
|
}
|
||||||
|
return &Signature{R: r, S: s}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nonceRFC6979 generates an ECDSA nonce (`k`) deterministically according to RFC 6979.
|
||||||
|
// It takes a 32-byte hash as an input and returns 32-byte nonce to be used in ECDSA algorithm.
|
||||||
|
func nonceRFC6979(privkey *big.Int, hash []byte) *big.Int {
|
||||||
|
|
||||||
|
curve := S256()
|
||||||
|
q := curve.Params().N
|
||||||
|
x := privkey
|
||||||
|
alg := sha256.New
|
||||||
|
|
||||||
|
qlen := q.BitLen()
|
||||||
|
holen := alg().Size()
|
||||||
|
rolen := (qlen + 7) >> 3
|
||||||
|
bx := append(int2octets(x, rolen), bits2octets(hash, curve, rolen)...)
|
||||||
|
|
||||||
|
// Step B
|
||||||
|
v := bytes.Repeat(oneInitializer, holen)
|
||||||
|
|
||||||
|
// Step C (Go zeroes the all allocated memory)
|
||||||
|
k := make([]byte, holen)
|
||||||
|
|
||||||
|
// Step D
|
||||||
|
k = mac(alg, k, append(append(v, 0x00), bx...))
|
||||||
|
|
||||||
|
// Step E
|
||||||
|
v = mac(alg, k, v)
|
||||||
|
|
||||||
|
// Step F
|
||||||
|
k = mac(alg, k, append(append(v, 0x01), bx...))
|
||||||
|
|
||||||
|
// Step G
|
||||||
|
v = mac(alg, k, v)
|
||||||
|
|
||||||
|
// Step H
|
||||||
|
for {
|
||||||
|
// Step H1
|
||||||
|
var t []byte
|
||||||
|
|
||||||
|
// Step H2
|
||||||
|
for len(t)*8 < qlen {
|
||||||
|
v = mac(alg, k, v)
|
||||||
|
t = append(t, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step H3
|
||||||
|
secret := hashToInt(t, curve)
|
||||||
|
if secret.Cmp(one) >= 0 && secret.Cmp(q) < 0 {
|
||||||
|
return secret
|
||||||
|
}
|
||||||
|
k = mac(alg, k, append(v, 0x00))
|
||||||
|
v = mac(alg, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mac returns an HMAC of the given key and message.
|
||||||
|
func mac(alg func() hash.Hash, k, m []byte) []byte {
|
||||||
|
h := hmac.New(alg, k)
|
||||||
|
h.Write(m)
|
||||||
|
return h.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc6979#section-2.3.3
|
||||||
|
func int2octets(v *big.Int, rolen int) []byte {
|
||||||
|
out := v.Bytes()
|
||||||
|
|
||||||
|
// left pad with zeros if it's too short
|
||||||
|
if len(out) < rolen {
|
||||||
|
out2 := make([]byte, rolen)
|
||||||
|
copy(out2[rolen-len(out):], out)
|
||||||
|
return out2
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop most significant bytes if it's too long
|
||||||
|
if len(out) > rolen {
|
||||||
|
out2 := make([]byte, rolen)
|
||||||
|
copy(out2, out[len(out)-rolen:])
|
||||||
|
return out2
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc6979#section-2.3.4
|
||||||
|
func bits2octets(in []byte, curve elliptic.Curve, rolen int) []byte {
|
||||||
|
z1 := hashToInt(in, curve)
|
||||||
|
z2 := new(big.Int).Sub(z1, curve.Params().N)
|
||||||
|
if z2.Sign() < 0 {
|
||||||
|
return int2octets(z1, rolen)
|
||||||
|
}
|
||||||
|
return int2octets(z2, rolen)
|
||||||
|
}
|
22
vendor/github.com/cenkalti/backoff/.gitignore
generated
vendored
Normal file
22
vendor/github.com/cenkalti/backoff/.gitignore
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
10
vendor/github.com/cenkalti/backoff/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/cenkalti/backoff/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.7
|
||||||
|
- 1.x
|
||||||
|
- tip
|
||||||
|
before_install:
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
script:
|
||||||
|
- $HOME/gopath/bin/goveralls -service=travis-ci
|
20
vendor/github.com/cenkalti/backoff/LICENSE
generated
vendored
Normal file
20
vendor/github.com/cenkalti/backoff/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Cenk Altı
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
30
vendor/github.com/cenkalti/backoff/README.md
generated
vendored
Normal file
30
vendor/github.com/cenkalti/backoff/README.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||||
|
|
||||||
|
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||||
|
|
||||||
|
[Exponential backoff][exponential backoff wiki]
|
||||||
|
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||||
|
in order to gradually find an acceptable rate.
|
||||||
|
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
See https://godoc.org/github.com/cenkalti/backoff#pkg-examples
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
* I would like to keep this library as small as possible.
|
||||||
|
* Please don't send a PR without opening an issue and discussing it first.
|
||||||
|
* If proposed change is not a common use case, I will probably not accept it.
|
||||||
|
|
||||||
|
[godoc]: https://godoc.org/github.com/cenkalti/backoff
|
||||||
|
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||||
|
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||||
|
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||||
|
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||||
|
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||||
|
|
||||||
|
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||||
|
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||||
|
|
||||||
|
[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_
|
66
vendor/github.com/cenkalti/backoff/backoff.go
generated
vendored
Normal file
66
vendor/github.com/cenkalti/backoff/backoff.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
// Package backoff implements backoff algorithms for retrying operations.
|
||||||
|
//
|
||||||
|
// Use Retry function for retrying operations that may fail.
|
||||||
|
// If Retry does not meet your needs,
|
||||||
|
// copy/paste the function into your project and modify as you wish.
|
||||||
|
//
|
||||||
|
// There is also Ticker type similar to time.Ticker.
|
||||||
|
// You can use it if you need to work with channels.
|
||||||
|
//
|
||||||
|
// See Examples section below for usage examples.
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// BackOff is a backoff policy for retrying an operation.
|
||||||
|
type BackOff interface {
|
||||||
|
// NextBackOff returns the duration to wait before retrying the operation,
|
||||||
|
// or backoff. Stop to indicate that no more retries should be made.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// duration := backoff.NextBackOff();
|
||||||
|
// if (duration == backoff.Stop) {
|
||||||
|
// // Do not retry operation.
|
||||||
|
// } else {
|
||||||
|
// // Sleep for duration and retry operation.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
NextBackOff() time.Duration
|
||||||
|
|
||||||
|
// Reset to initial state.
|
||||||
|
Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||||
|
const Stop time.Duration = -1
|
||||||
|
|
||||||
|
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||||
|
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||||
|
type ZeroBackOff struct{}
|
||||||
|
|
||||||
|
func (b *ZeroBackOff) Reset() {}
|
||||||
|
|
||||||
|
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||||
|
|
||||||
|
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||||
|
// NextBackOff(), meaning that the operation should never be retried.
|
||||||
|
type StopBackOff struct{}
|
||||||
|
|
||||||
|
func (b *StopBackOff) Reset() {}
|
||||||
|
|
||||||
|
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||||
|
|
||||||
|
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||||
|
// This is in contrast to an exponential backoff policy,
|
||||||
|
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||||
|
type ConstantBackOff struct {
|
||||||
|
Interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *ConstantBackOff) Reset() {}
|
||||||
|
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||||
|
|
||||||
|
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||||
|
return &ConstantBackOff{Interval: d}
|
||||||
|
}
|
63
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
Normal file
63
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BackOffContext is a backoff policy that stops retrying after the context
|
||||||
|
// is canceled.
|
||||||
|
type BackOffContext interface {
|
||||||
|
BackOff
|
||||||
|
Context() context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type backOffContext struct {
|
||||||
|
BackOff
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext returns a BackOffContext with context ctx
|
||||||
|
//
|
||||||
|
// ctx must not be nil
|
||||||
|
func WithContext(b BackOff, ctx context.Context) BackOffContext {
|
||||||
|
if ctx == nil {
|
||||||
|
panic("nil context")
|
||||||
|
}
|
||||||
|
|
||||||
|
if b, ok := b.(*backOffContext); ok {
|
||||||
|
return &backOffContext{
|
||||||
|
BackOff: b.BackOff,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &backOffContext{
|
||||||
|
BackOff: b,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureContext(b BackOff) BackOffContext {
|
||||||
|
if cb, ok := b.(BackOffContext); ok {
|
||||||
|
return cb
|
||||||
|
}
|
||||||
|
return WithContext(b, context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffContext) Context() context.Context {
|
||||||
|
return b.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffContext) NextBackOff() time.Duration {
|
||||||
|
select {
|
||||||
|
case <-b.ctx.Done():
|
||||||
|
return Stop
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
next := b.BackOff.NextBackOff()
|
||||||
|
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
|
||||||
|
return Stop
|
||||||
|
}
|
||||||
|
return next
|
||||||
|
}
|
153
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
Normal file
153
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||||
|
period for each retry attempt using a randomization function that grows exponentially.
|
||||||
|
|
||||||
|
NextBackOff() is calculated using the following formula:
|
||||||
|
|
||||||
|
randomized interval =
|
||||||
|
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||||
|
|
||||||
|
In other words NextBackOff() will range between the randomization factor
|
||||||
|
percentage below and above the retry interval.
|
||||||
|
|
||||||
|
For example, given the following parameters:
|
||||||
|
|
||||||
|
RetryInterval = 2
|
||||||
|
RandomizationFactor = 0.5
|
||||||
|
Multiplier = 2
|
||||||
|
|
||||||
|
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||||
|
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||||
|
|
||||||
|
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||||
|
|
||||||
|
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||||
|
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||||
|
|
||||||
|
The elapsed time can be reset by calling Reset().
|
||||||
|
|
||||||
|
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||||
|
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||||
|
|
||||||
|
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||||
|
|
||||||
|
1 0.5 [0.25, 0.75]
|
||||||
|
2 0.75 [0.375, 1.125]
|
||||||
|
3 1.125 [0.562, 1.687]
|
||||||
|
4 1.687 [0.8435, 2.53]
|
||||||
|
5 2.53 [1.265, 3.795]
|
||||||
|
6 3.795 [1.897, 5.692]
|
||||||
|
7 5.692 [2.846, 8.538]
|
||||||
|
8 8.538 [4.269, 12.807]
|
||||||
|
9 12.807 [6.403, 19.210]
|
||||||
|
10 19.210 backoff.Stop
|
||||||
|
|
||||||
|
Note: Implementation is not thread-safe.
|
||||||
|
*/
|
||||||
|
type ExponentialBackOff struct {
|
||||||
|
InitialInterval time.Duration
|
||||||
|
RandomizationFactor float64
|
||||||
|
Multiplier float64
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// After MaxElapsedTime the ExponentialBackOff stops.
|
||||||
|
// It never stops if MaxElapsedTime == 0.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
Clock Clock
|
||||||
|
|
||||||
|
currentInterval time.Duration
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clock is an interface that returns current time for BackOff.
|
||||||
|
type Clock interface {
|
||||||
|
Now() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default values for ExponentialBackOff.
|
||||||
|
const (
|
||||||
|
DefaultInitialInterval = 500 * time.Millisecond
|
||||||
|
DefaultRandomizationFactor = 0.5
|
||||||
|
DefaultMultiplier = 1.5
|
||||||
|
DefaultMaxInterval = 60 * time.Second
|
||||||
|
DefaultMaxElapsedTime = 15 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||||
|
func NewExponentialBackOff() *ExponentialBackOff {
|
||||||
|
b := &ExponentialBackOff{
|
||||||
|
InitialInterval: DefaultInitialInterval,
|
||||||
|
RandomizationFactor: DefaultRandomizationFactor,
|
||||||
|
Multiplier: DefaultMultiplier,
|
||||||
|
MaxInterval: DefaultMaxInterval,
|
||||||
|
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||||
|
Clock: SystemClock,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
type systemClock struct{}
|
||||||
|
|
||||||
|
func (t systemClock) Now() time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemClock implements Clock interface that uses time.Now().
|
||||||
|
var SystemClock = systemClock{}
|
||||||
|
|
||||||
|
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||||
|
func (b *ExponentialBackOff) Reset() {
|
||||||
|
b.currentInterval = b.InitialInterval
|
||||||
|
b.startTime = b.Clock.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextBackOff calculates the next backoff interval using the formula:
|
||||||
|
// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
|
||||||
|
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||||
|
// Make sure we have not gone over the maximum elapsed time.
|
||||||
|
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
|
||||||
|
return Stop
|
||||||
|
}
|
||||||
|
defer b.incrementCurrentInterval()
|
||||||
|
return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||||
|
// is created and is reset when Reset() is called.
|
||||||
|
//
|
||||||
|
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||||
|
// safe to call even while the backoff policy is used by a running
|
||||||
|
// ticker.
|
||||||
|
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||||
|
return b.Clock.Now().Sub(b.startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increments the current interval by multiplying it with the multiplier.
|
||||||
|
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||||
|
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||||
|
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||||
|
b.currentInterval = b.MaxInterval
|
||||||
|
} else {
|
||||||
|
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a random value from the following interval:
|
||||||
|
// [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
|
||||||
|
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||||
|
var delta = randomizationFactor * float64(currentInterval)
|
||||||
|
var minInterval = float64(currentInterval) - delta
|
||||||
|
var maxInterval = float64(currentInterval) + delta
|
||||||
|
|
||||||
|
// Get a random value from the range [minInterval, maxInterval].
|
||||||
|
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||||
|
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||||
|
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||||
|
}
|
3
vendor/github.com/cenkalti/backoff/go.mod
generated
vendored
Normal file
3
vendor/github.com/cenkalti/backoff/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module github.com/cenkalti/backoff/v3
|
||||||
|
|
||||||
|
go 1.12
|
82
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
Normal file
82
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// An Operation is executing by Retry() or RetryNotify().
|
||||||
|
// The operation will be retried using a backoff policy if it returns an error.
|
||||||
|
type Operation func() error
|
||||||
|
|
||||||
|
// Notify is a notify-on-error function. It receives an operation error and
|
||||||
|
// backoff delay if the operation failed (with an error).
|
||||||
|
//
|
||||||
|
// NOTE that if the backoff policy stated to stop retrying,
|
||||||
|
// the notify function isn't called.
|
||||||
|
type Notify func(error, time.Duration)
|
||||||
|
|
||||||
|
// Retry the operation o until it does not return error or BackOff stops.
|
||||||
|
// o is guaranteed to be run at least once.
|
||||||
|
//
|
||||||
|
// If o returns a *PermanentError, the operation is not retried, and the
|
||||||
|
// wrapped error is returned.
|
||||||
|
//
|
||||||
|
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||||
|
// failed operation returns.
|
||||||
|
func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
|
||||||
|
|
||||||
|
// RetryNotify calls notify function with the error and wait duration
|
||||||
|
// for each failed attempt before sleep.
|
||||||
|
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||||
|
var err error
|
||||||
|
var next time.Duration
|
||||||
|
var t *time.Timer
|
||||||
|
|
||||||
|
cb := ensureContext(b)
|
||||||
|
|
||||||
|
b.Reset()
|
||||||
|
for {
|
||||||
|
if err = operation(); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if permanent, ok := err.(*PermanentError); ok {
|
||||||
|
return permanent.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
if next = cb.NextBackOff(); next == Stop {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if notify != nil {
|
||||||
|
notify(err, next)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t == nil {
|
||||||
|
t = time.NewTimer(next)
|
||||||
|
defer t.Stop()
|
||||||
|
} else {
|
||||||
|
t.Reset(next)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-cb.Context().Done():
|
||||||
|
return err
|
||||||
|
case <-t.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PermanentError signals that the operation should not be retried.
|
||||||
|
type PermanentError struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *PermanentError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permanent wraps the given err in a *PermanentError.
|
||||||
|
func Permanent(err error) *PermanentError {
|
||||||
|
return &PermanentError{
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
82
vendor/github.com/cenkalti/backoff/ticker.go
generated
vendored
Normal file
82
vendor/github.com/cenkalti/backoff/ticker.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||||
|
//
|
||||||
|
// Ticks will continue to arrive when the previous operation is still running,
|
||||||
|
// so operations that take a while to fail could run in quick succession.
|
||||||
|
type Ticker struct {
|
||||||
|
C <-chan time.Time
|
||||||
|
c chan time.Time
|
||||||
|
b BackOffContext
|
||||||
|
stop chan struct{}
|
||||||
|
stopOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTicker returns a new Ticker containing a channel that will send
|
||||||
|
// the time at times specified by the BackOff argument. Ticker is
|
||||||
|
// guaranteed to tick at least once. The channel is closed when Stop
|
||||||
|
// method is called or BackOff stops. It is not safe to manipulate the
|
||||||
|
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||||
|
// while the ticker is running.
|
||||||
|
func NewTicker(b BackOff) *Ticker {
|
||||||
|
c := make(chan time.Time)
|
||||||
|
t := &Ticker{
|
||||||
|
C: c,
|
||||||
|
c: c,
|
||||||
|
b: ensureContext(b),
|
||||||
|
stop: make(chan struct{}),
|
||||||
|
}
|
||||||
|
t.b.Reset()
|
||||||
|
go t.run()
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||||
|
func (t *Ticker) Stop() {
|
||||||
|
t.stopOnce.Do(func() { close(t.stop) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Ticker) run() {
|
||||||
|
c := t.c
|
||||||
|
defer close(c)
|
||||||
|
|
||||||
|
// Ticker is guaranteed to tick at least once.
|
||||||
|
afterC := t.send(time.Now())
|
||||||
|
|
||||||
|
for {
|
||||||
|
if afterC == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case tick := <-afterC:
|
||||||
|
afterC = t.send(tick)
|
||||||
|
case <-t.stop:
|
||||||
|
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||||
|
return
|
||||||
|
case <-t.b.Context().Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||||
|
select {
|
||||||
|
case t.c <- tick:
|
||||||
|
case <-t.stop:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
next := t.b.NextBackOff()
|
||||||
|
if next == Stop {
|
||||||
|
t.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.After(next)
|
||||||
|
}
|
35
vendor/github.com/cenkalti/backoff/tries.go
generated
vendored
Normal file
35
vendor/github.com/cenkalti/backoff/tries.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
/*
|
||||||
|
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||||
|
return Stop if NextBackOff() has been called too many times since
|
||||||
|
the last time Reset() was called
|
||||||
|
|
||||||
|
Note: Implementation is not thread-safe.
|
||||||
|
*/
|
||||||
|
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||||
|
return &backOffTries{delegate: b, maxTries: max}
|
||||||
|
}
|
||||||
|
|
||||||
|
type backOffTries struct {
|
||||||
|
delegate BackOff
|
||||||
|
maxTries uint64
|
||||||
|
numTries uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffTries) NextBackOff() time.Duration {
|
||||||
|
if b.maxTries > 0 {
|
||||||
|
if b.maxTries <= b.numTries {
|
||||||
|
return Stop
|
||||||
|
}
|
||||||
|
b.numTries++
|
||||||
|
}
|
||||||
|
return b.delegate.NextBackOff()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *backOffTries) Reset() {
|
||||||
|
b.numTries = 0
|
||||||
|
b.delegate.Reset()
|
||||||
|
}
|
22
vendor/github.com/cheekybits/genny/LICENSE
generated
vendored
Normal file
22
vendor/github.com/cheekybits/genny/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 cheekybits
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
2
vendor/github.com/cheekybits/genny/generic/doc.go
generated
vendored
Normal file
2
vendor/github.com/cheekybits/genny/generic/doc.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Package generic contains the generic marker types.
|
||||||
|
package generic
|
13
vendor/github.com/cheekybits/genny/generic/generic.go
generated
vendored
Normal file
13
vendor/github.com/cheekybits/genny/generic/generic.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package generic
|
||||||
|
|
||||||
|
// Type is the placeholder type that indicates a generic value.
|
||||||
|
// When genny is executed, variables of this type will be replaced with
|
||||||
|
// references to the specific types.
|
||||||
|
// var GenericType generic.Type
|
||||||
|
type Type interface{}
|
||||||
|
|
||||||
|
// Number is the placehoder type that indiccates a generic numerical value.
|
||||||
|
// When genny is executed, variables of this type will be replaced with
|
||||||
|
// references to the specific types.
|
||||||
|
// var GenericType generic.Number
|
||||||
|
type Number float64
|
202
vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
5
vendor/github.com/coreos/go-semver/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-semver/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
CoreOS Project
|
||||||
|
Copyright 2018 CoreOS, Inc
|
||||||
|
|
||||||
|
This product includes software developed at CoreOS, Inc.
|
||||||
|
(http://www.coreos.com/).
|
296
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
Normal file
296
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
Normal file
@ -0,0 +1,296 @@
|
|||||||
|
// Copyright 2013-2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Semantic Versions http://semver.org
|
||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Version struct {
|
||||||
|
Major int64
|
||||||
|
Minor int64
|
||||||
|
Patch int64
|
||||||
|
PreRelease PreRelease
|
||||||
|
Metadata string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PreRelease string
|
||||||
|
|
||||||
|
func splitOff(input *string, delim string) (val string) {
|
||||||
|
parts := strings.SplitN(*input, delim, 2)
|
||||||
|
|
||||||
|
if len(parts) == 2 {
|
||||||
|
*input = parts[0]
|
||||||
|
val = parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(version string) *Version {
|
||||||
|
return Must(NewVersion(version))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVersion(version string) (*Version, error) {
|
||||||
|
v := Version{}
|
||||||
|
|
||||||
|
if err := v.Set(version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||||
|
func Must(v *Version, err error) *Version {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set parses and updates v from the given version string. Implements flag.Value
|
||||||
|
func (v *Version) Set(version string) error {
|
||||||
|
metadata := splitOff(&version, "+")
|
||||||
|
preRelease := PreRelease(splitOff(&version, "-"))
|
||||||
|
dotParts := strings.SplitN(version, ".", 3)
|
||||||
|
|
||||||
|
if len(dotParts) != 3 {
|
||||||
|
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||||
|
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateIdentifier(metadata); err != nil {
|
||||||
|
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed := make([]int64, 3, 3)
|
||||||
|
|
||||||
|
for i, v := range dotParts[:3] {
|
||||||
|
val, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
parsed[i] = val
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Metadata = metadata
|
||||||
|
v.PreRelease = preRelease
|
||||||
|
v.Major = parsed[0]
|
||||||
|
v.Minor = parsed[1]
|
||||||
|
v.Patch = parsed[2]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) String() string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||||
|
|
||||||
|
if v.PreRelease != "" {
|
||||||
|
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Metadata != "" {
|
||||||
|
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var data string
|
||||||
|
if err := unmarshal(&data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return v.Set(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(`"` + v.String() + `"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||||
|
l := len(data)
|
||||||
|
if l == 0 || string(data) == `""` {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||||
|
return errors.New("invalid semver string")
|
||||||
|
}
|
||||||
|
return v.Set(string(data[1 : l-1]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||||
|
// returning -1, 0, or +1 respectively.
|
||||||
|
func (v Version) Compare(versionB Version) int {
|
||||||
|
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||||
|
return cmp
|
||||||
|
}
|
||||||
|
return preReleaseCompare(v, versionB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal tests if v is equal to versionB.
|
||||||
|
func (v Version) Equal(versionB Version) bool {
|
||||||
|
return v.Compare(versionB) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// LessThan tests if v is less than versionB.
|
||||||
|
func (v Version) LessThan(versionB Version) bool {
|
||||||
|
return v.Compare(versionB) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||||
|
func (v Version) Slice() []int64 {
|
||||||
|
return []int64{v.Major, v.Minor, v.Patch}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PreRelease) Slice() []string {
|
||||||
|
preRelease := string(p)
|
||||||
|
return strings.Split(preRelease, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||||
|
a := versionA.PreRelease
|
||||||
|
b := versionB.PreRelease
|
||||||
|
|
||||||
|
/* Handle the case where if two versions are otherwise equal it is the
|
||||||
|
* one without a PreRelease that is greater */
|
||||||
|
if len(a) == 0 && (len(b) > 0) {
|
||||||
|
return 1
|
||||||
|
} else if len(b) == 0 && (len(a) > 0) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is a prerelease, check and compare each part.
|
||||||
|
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||||
|
if len(versionA) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
a := versionA[0]
|
||||||
|
b := versionB[0]
|
||||||
|
|
||||||
|
if a > b {
|
||||||
|
return 1
|
||||||
|
} else if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return recursiveCompare(versionA[1:], versionB[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||||
|
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||||
|
// if all of the preceding identifiers are equal.
|
||||||
|
if len(versionA) == 0 {
|
||||||
|
if len(versionB) > 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
} else if len(versionB) == 0 {
|
||||||
|
// We're longer than versionB so return 1.
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
a := versionA[0]
|
||||||
|
b := versionB[0]
|
||||||
|
|
||||||
|
aInt := false
|
||||||
|
bInt := false
|
||||||
|
|
||||||
|
aI, err := strconv.Atoi(versionA[0])
|
||||||
|
if err == nil {
|
||||||
|
aInt = true
|
||||||
|
}
|
||||||
|
|
||||||
|
bI, err := strconv.Atoi(versionB[0])
|
||||||
|
if err == nil {
|
||||||
|
bInt = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||||
|
if aInt && !bInt {
|
||||||
|
return -1
|
||||||
|
} else if !aInt && bInt {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Integer Comparison
|
||||||
|
if aInt && bInt {
|
||||||
|
if aI > bI {
|
||||||
|
return 1
|
||||||
|
} else if aI < bI {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle String Comparison
|
||||||
|
if a > b {
|
||||||
|
return 1
|
||||||
|
} else if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpMajor() {
|
||||||
|
v.Major += 1
|
||||||
|
v.Minor = 0
|
||||||
|
v.Patch = 0
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpMinor() {
|
||||||
|
v.Minor += 1
|
||||||
|
v.Patch = 0
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpPatch() {
|
||||||
|
v.Patch += 1
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||||
|
func validateIdentifier(id string) error {
|
||||||
|
if id != "" && !reIdentifier.MatchString(id) {
|
||||||
|
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||||
|
// identifiers satisfy the spec requirements
|
||||||
|
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
38
vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
Normal file
38
vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2013-2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Versions []*Version
|
||||||
|
|
||||||
|
func (s Versions) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Versions) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Versions) Less(i, j int) bool {
|
||||||
|
return s[i].LessThan(*s[j])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort sorts the given slice of Version
|
||||||
|
func Sort(versions []*Version) {
|
||||||
|
sort.Sort(Versions(versions))
|
||||||
|
}
|
22
vendor/github.com/cskr/pubsub/LICENSE
generated
vendored
Normal file
22
vendor/github.com/cskr/pubsub/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Copyright (c) 2013, Chandra Sekar S
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
13
vendor/github.com/cskr/pubsub/README.md
generated
vendored
Normal file
13
vendor/github.com/cskr/pubsub/README.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[![GoDoc](https://godoc.org/github.com/cskr/pubsub?status.svg)](https://godoc.org/github.com/cskr/pubsub)
|
||||||
|
|
||||||
|
Package pubsub implements a simple multi-topic pub-sub library.
|
||||||
|
|
||||||
|
Install pubsub with,
|
||||||
|
|
||||||
|
go get github.com/cskr/pubsub
|
||||||
|
|
||||||
|
This repository is a go module and contains tagged releases. Please pin a
|
||||||
|
version for production use.
|
||||||
|
|
||||||
|
Use of this module is governed by a BSD-style license that can be found in the
|
||||||
|
[LICENSE](LICENSE) file.
|
3
vendor/github.com/cskr/pubsub/go.mod
generated
vendored
Normal file
3
vendor/github.com/cskr/pubsub/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module github.com/cskr/pubsub
|
||||||
|
|
||||||
|
go 1.12
|
267
vendor/github.com/cskr/pubsub/pubsub.go
generated
vendored
Normal file
267
vendor/github.com/cskr/pubsub/pubsub.go
generated
vendored
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
// Copyright 2013, Chandra Sekar S. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package pubsub implements a simple multi-topic pub-sub
|
||||||
|
// library.
|
||||||
|
//
|
||||||
|
// Topics must be strings and messages of any type can be
|
||||||
|
// published. A topic can have any number of subcribers and
|
||||||
|
// all of them receive messages published on the topic.
|
||||||
|
package pubsub
|
||||||
|
|
||||||
|
type operation int
|
||||||
|
|
||||||
|
const (
|
||||||
|
sub operation = iota
|
||||||
|
subOnce
|
||||||
|
subOnceEach
|
||||||
|
pub
|
||||||
|
tryPub
|
||||||
|
unsub
|
||||||
|
unsubAll
|
||||||
|
closeTopic
|
||||||
|
shutdown
|
||||||
|
)
|
||||||
|
|
||||||
|
// PubSub is a collection of topics.
|
||||||
|
type PubSub struct {
|
||||||
|
cmdChan chan cmd
|
||||||
|
capacity int
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
op operation
|
||||||
|
topics []string
|
||||||
|
ch chan interface{}
|
||||||
|
msg interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new PubSub and starts a goroutine for handling operations.
|
||||||
|
// The capacity of the channels created by Sub and SubOnce will be as specified.
|
||||||
|
func New(capacity int) *PubSub {
|
||||||
|
ps := &PubSub{make(chan cmd), capacity}
|
||||||
|
go ps.start()
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sub returns a channel on which messages published on any of
|
||||||
|
// the specified topics can be received.
|
||||||
|
func (ps *PubSub) Sub(topics ...string) chan interface{} {
|
||||||
|
return ps.sub(sub, topics...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubOnce is similar to Sub, but only the first message published, after subscription,
|
||||||
|
// on any of the specified topics can be received.
|
||||||
|
func (ps *PubSub) SubOnce(topics ...string) chan interface{} {
|
||||||
|
return ps.sub(subOnce, topics...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubOnceEach returns a channel on which callers receive, at most, one message
|
||||||
|
// for each topic.
|
||||||
|
func (ps *PubSub) SubOnceEach(topics ...string) chan interface{} {
|
||||||
|
return ps.sub(subOnceEach, topics...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *PubSub) sub(op operation, topics ...string) chan interface{} {
|
||||||
|
ch := make(chan interface{}, ps.capacity)
|
||||||
|
ps.cmdChan <- cmd{op: op, topics: topics, ch: ch}
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSub adds subscriptions to an existing channel.
|
||||||
|
func (ps *PubSub) AddSub(ch chan interface{}, topics ...string) {
|
||||||
|
ps.cmdChan <- cmd{op: sub, topics: topics, ch: ch}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSubOnceEach adds subscriptions to an existing channel with SubOnceEach
|
||||||
|
// behavior.
|
||||||
|
func (ps *PubSub) AddSubOnceEach(ch chan interface{}, topics ...string) {
|
||||||
|
ps.cmdChan <- cmd{op: subOnceEach, topics: topics, ch: ch}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pub publishes the given message to all subscribers of
|
||||||
|
// the specified topics.
|
||||||
|
func (ps *PubSub) Pub(msg interface{}, topics ...string) {
|
||||||
|
ps.cmdChan <- cmd{op: pub, topics: topics, msg: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TryPub publishes the given message to all subscribers of
|
||||||
|
// the specified topics if the topic has buffer space.
|
||||||
|
func (ps *PubSub) TryPub(msg interface{}, topics ...string) {
|
||||||
|
ps.cmdChan <- cmd{op: tryPub, topics: topics, msg: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unsub unsubscribes the given channel from the specified
|
||||||
|
// topics. If no topic is specified, it is unsubscribed
|
||||||
|
// from all topics.
|
||||||
|
//
|
||||||
|
// Unsub must be called from a goroutine that is different from the subscriber.
|
||||||
|
// The subscriber must consume messages from the channel until it reaches the
|
||||||
|
// end. Not doing so can result in a deadlock.
|
||||||
|
func (ps *PubSub) Unsub(ch chan interface{}, topics ...string) {
|
||||||
|
if len(topics) == 0 {
|
||||||
|
ps.cmdChan <- cmd{op: unsubAll, ch: ch}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ps.cmdChan <- cmd{op: unsub, topics: topics, ch: ch}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes all channels currently subscribed to the specified topics.
|
||||||
|
// If a channel is subscribed to multiple topics, some of which is
|
||||||
|
// not specified, it is not closed.
|
||||||
|
func (ps *PubSub) Close(topics ...string) {
|
||||||
|
ps.cmdChan <- cmd{op: closeTopic, topics: topics}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown closes all subscribed channels and terminates the goroutine.
|
||||||
|
func (ps *PubSub) Shutdown() {
|
||||||
|
ps.cmdChan <- cmd{op: shutdown}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps *PubSub) start() {
|
||||||
|
reg := registry{
|
||||||
|
topics: make(map[string]map[chan interface{}]subType),
|
||||||
|
revTopics: make(map[chan interface{}]map[string]bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for cmd := range ps.cmdChan {
|
||||||
|
if cmd.topics == nil {
|
||||||
|
switch cmd.op {
|
||||||
|
case unsubAll:
|
||||||
|
reg.removeChannel(cmd.ch)
|
||||||
|
|
||||||
|
case shutdown:
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, topic := range cmd.topics {
|
||||||
|
switch cmd.op {
|
||||||
|
case sub:
|
||||||
|
reg.add(topic, cmd.ch, normal)
|
||||||
|
|
||||||
|
case subOnce:
|
||||||
|
reg.add(topic, cmd.ch, onceAny)
|
||||||
|
|
||||||
|
case subOnceEach:
|
||||||
|
reg.add(topic, cmd.ch, onceEach)
|
||||||
|
|
||||||
|
case tryPub:
|
||||||
|
reg.sendNoWait(topic, cmd.msg)
|
||||||
|
|
||||||
|
case pub:
|
||||||
|
reg.send(topic, cmd.msg)
|
||||||
|
|
||||||
|
case unsub:
|
||||||
|
reg.remove(topic, cmd.ch)
|
||||||
|
|
||||||
|
case closeTopic:
|
||||||
|
reg.removeTopic(topic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for topic, chans := range reg.topics {
|
||||||
|
for ch := range chans {
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registry maintains the current subscription state. It's not
|
||||||
|
// safe to access a registry from multiple goroutines simultaneously.
|
||||||
|
type registry struct {
|
||||||
|
topics map[string]map[chan interface{}]subType
|
||||||
|
revTopics map[chan interface{}]map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type subType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
onceAny subType = iota
|
||||||
|
onceEach
|
||||||
|
normal
|
||||||
|
)
|
||||||
|
|
||||||
|
func (reg *registry) add(topic string, ch chan interface{}, st subType) {
|
||||||
|
if reg.topics[topic] == nil {
|
||||||
|
reg.topics[topic] = make(map[chan interface{}]subType)
|
||||||
|
}
|
||||||
|
reg.topics[topic][ch] = st
|
||||||
|
|
||||||
|
if reg.revTopics[ch] == nil {
|
||||||
|
reg.revTopics[ch] = make(map[string]bool)
|
||||||
|
}
|
||||||
|
reg.revTopics[ch][topic] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reg *registry) send(topic string, msg interface{}) {
|
||||||
|
for ch, st := range reg.topics[topic] {
|
||||||
|
ch <- msg
|
||||||
|
switch st {
|
||||||
|
case onceAny:
|
||||||
|
for topic := range reg.revTopics[ch] {
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
case onceEach:
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reg *registry) sendNoWait(topic string, msg interface{}) {
|
||||||
|
for ch, st := range reg.topics[topic] {
|
||||||
|
select {
|
||||||
|
case ch <- msg:
|
||||||
|
switch st {
|
||||||
|
case onceAny:
|
||||||
|
for topic := range reg.revTopics[ch] {
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
case onceEach:
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reg *registry) removeTopic(topic string) {
|
||||||
|
for ch := range reg.topics[topic] {
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reg *registry) removeChannel(ch chan interface{}) {
|
||||||
|
for topic := range reg.revTopics[ch] {
|
||||||
|
reg.remove(topic, ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reg *registry) remove(topic string, ch chan interface{}) {
|
||||||
|
if _, ok := reg.topics[topic]; !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := reg.topics[topic][ch]; !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(reg.topics[topic], ch)
|
||||||
|
delete(reg.revTopics[ch], topic)
|
||||||
|
|
||||||
|
if len(reg.topics[topic]) == 0 {
|
||||||
|
delete(reg.topics, topic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(reg.revTopics[ch]) == 0 {
|
||||||
|
close(ch)
|
||||||
|
delete(reg.revTopics, ch)
|
||||||
|
}
|
||||||
|
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
ISC License
|
||||||
|
|
||||||
|
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||||
|
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||||
|
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||||
|
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = false
|
||||||
|
|
||||||
|
// ptrSize is the size of a pointer on the current arch.
|
||||||
|
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
type flag uintptr
|
||||||
|
|
||||||
|
var (
|
||||||
|
// flagRO indicates whether the value field of a reflect.Value
|
||||||
|
// is read-only.
|
||||||
|
flagRO flag
|
||||||
|
|
||||||
|
// flagAddr indicates whether the address of the reflect.Value's
|
||||||
|
// value may be taken.
|
||||||
|
flagAddr flag
|
||||||
|
)
|
||||||
|
|
||||||
|
// flagKindMask holds the bits that make up the kind
|
||||||
|
// part of the flags field. In all the supported versions,
|
||||||
|
// it is in the lower 5 bits.
|
||||||
|
const flagKindMask = flag(0x1f)
|
||||||
|
|
||||||
|
// Different versions of Go have used different
|
||||||
|
// bit layouts for the flags type. This table
|
||||||
|
// records the known combinations.
|
||||||
|
var okFlags = []struct {
|
||||||
|
ro, addr flag
|
||||||
|
}{{
|
||||||
|
// From Go 1.4 to 1.5
|
||||||
|
ro: 1 << 5,
|
||||||
|
addr: 1 << 7,
|
||||||
|
}, {
|
||||||
|
// Up to Go tip.
|
||||||
|
ro: 1<<5 | 1<<6,
|
||||||
|
addr: 1 << 8,
|
||||||
|
}}
|
||||||
|
|
||||||
|
var flagValOffset = func() uintptr {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
return field.Offset
|
||||||
|
}()
|
||||||
|
|
||||||
|
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||||
|
func flagField(v *reflect.Value) *flag {
|
||||||
|
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||||
|
// the typical safety restrictions preventing access to unaddressable and
|
||||||
|
// unexported data. It works by digging the raw pointer to the underlying
|
||||||
|
// value out of the protected value and generating a new unprotected (unsafe)
|
||||||
|
// reflect.Value to it.
|
||||||
|
//
|
||||||
|
// This allows us to check for implementations of the Stringer and error
|
||||||
|
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||||
|
// inaccessible values such as unexported struct fields.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
flagFieldPtr := flagField(&v)
|
||||||
|
*flagFieldPtr &^= flagRO
|
||||||
|
*flagFieldPtr |= flagAddr
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity checks against future reflect package changes
|
||||||
|
// to the type or semantics of the Value.flag field.
|
||||||
|
func init() {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||||
|
panic("reflect.Value flag field has changed kind")
|
||||||
|
}
|
||||||
|
type t0 int
|
||||||
|
var t struct {
|
||||||
|
A t0
|
||||||
|
// t0 will have flagEmbedRO set.
|
||||||
|
t0
|
||||||
|
// a will have flagStickyRO set
|
||||||
|
a t0
|
||||||
|
}
|
||||||
|
vA := reflect.ValueOf(t).FieldByName("A")
|
||||||
|
va := reflect.ValueOf(t).FieldByName("a")
|
||||||
|
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||||
|
|
||||||
|
// Infer flagRO from the difference between the flags
|
||||||
|
// for the (otherwise identical) fields in t.
|
||||||
|
flagPublic := *flagField(&vA)
|
||||||
|
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||||
|
flagRO = flagPublic ^ flagWithRO
|
||||||
|
|
||||||
|
// Infer flagAddr from the difference between a value
|
||||||
|
// taken from a pointer and not.
|
||||||
|
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||||
|
flagNoPtr := *flagField(&vA)
|
||||||
|
flagPtr := *flagField(&vPtrA)
|
||||||
|
flagAddr = flagNoPtr ^ flagPtr
|
||||||
|
|
||||||
|
// Check that the inferred flags tally with one of the known versions.
|
||||||
|
for _, f := range okFlags {
|
||||||
|
if flagRO == f.ro && flagAddr == f.addr {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("reflect.Value read-only flag has changed semantics")
|
||||||
|
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||||
|
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// +build js appengine safe disableunsafe !go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||||
|
// that bypasses the typical safety restrictions preventing access to
|
||||||
|
// unaddressable and unexported data. However, doing this relies on access to
|
||||||
|
// the unsafe package. This is a stub version which simply returns the passed
|
||||||
|
// reflect.Value when the unsafe package is not available.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
return v
|
||||||
|
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||||
|
// the technique used in the fmt package.
|
||||||
|
var (
|
||||||
|
panicBytes = []byte("(PANIC=")
|
||||||
|
plusBytes = []byte("+")
|
||||||
|
iBytes = []byte("i")
|
||||||
|
trueBytes = []byte("true")
|
||||||
|
falseBytes = []byte("false")
|
||||||
|
interfaceBytes = []byte("(interface {})")
|
||||||
|
commaNewlineBytes = []byte(",\n")
|
||||||
|
newlineBytes = []byte("\n")
|
||||||
|
openBraceBytes = []byte("{")
|
||||||
|
openBraceNewlineBytes = []byte("{\n")
|
||||||
|
closeBraceBytes = []byte("}")
|
||||||
|
asteriskBytes = []byte("*")
|
||||||
|
colonBytes = []byte(":")
|
||||||
|
colonSpaceBytes = []byte(": ")
|
||||||
|
openParenBytes = []byte("(")
|
||||||
|
closeParenBytes = []byte(")")
|
||||||
|
spaceBytes = []byte(" ")
|
||||||
|
pointerChainBytes = []byte("->")
|
||||||
|
nilAngleBytes = []byte("<nil>")
|
||||||
|
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||||
|
maxShortBytes = []byte("<max>")
|
||||||
|
circularBytes = []byte("<already shown>")
|
||||||
|
circularShortBytes = []byte("<shown>")
|
||||||
|
invalidAngleBytes = []byte("<invalid>")
|
||||||
|
openBracketBytes = []byte("[")
|
||||||
|
closeBracketBytes = []byte("]")
|
||||||
|
percentBytes = []byte("%")
|
||||||
|
precisionBytes = []byte(".")
|
||||||
|
openAngleBytes = []byte("<")
|
||||||
|
closeAngleBytes = []byte(">")
|
||||||
|
openMapBytes = []byte("map[")
|
||||||
|
closeMapBytes = []byte("]")
|
||||||
|
lenEqualsBytes = []byte("len=")
|
||||||
|
capEqualsBytes = []byte("cap=")
|
||||||
|
)
|
||||||
|
|
||||||
|
// hexDigits is used to map a decimal value to a hex digit.
|
||||||
|
var hexDigits = "0123456789abcdef"
|
||||||
|
|
||||||
|
// catchPanic handles any panics that might occur during the handleMethods
|
||||||
|
// calls.
|
||||||
|
func catchPanic(w io.Writer, v reflect.Value) {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
w.Write(panicBytes)
|
||||||
|
fmt.Fprintf(w, "%v", err)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMethods attempts to call the Error and String methods on the underlying
|
||||||
|
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||||
|
//
|
||||||
|
// It handles panics in any called methods by catching and displaying the error
|
||||||
|
// as the formatted value.
|
||||||
|
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||||
|
// We need an interface to check if the type implements the error or
|
||||||
|
// Stringer interface. However, the reflect package won't give us an
|
||||||
|
// interface on certain things like unexported struct fields in order
|
||||||
|
// to enforce visibility rules. We use unsafe, when it's available,
|
||||||
|
// to bypass these restrictions since this package does not mutate the
|
||||||
|
// values.
|
||||||
|
if !v.CanInterface() {
|
||||||
|
if UnsafeDisabled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose whether or not to do error and Stringer interface lookups against
|
||||||
|
// the base type or a pointer to the base type depending on settings.
|
||||||
|
// Technically calling one of these methods with a pointer receiver can
|
||||||
|
// mutate the value, however, types which choose to satisify an error or
|
||||||
|
// Stringer interface with a pointer receiver should not be mutating their
|
||||||
|
// state inside these interface methods.
|
||||||
|
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
if v.CanAddr() {
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is it an error or Stringer?
|
||||||
|
switch iface := v.Interface().(type) {
|
||||||
|
case error:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
return true
|
||||||
|
|
||||||
|
case fmt.Stringer:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// printBool outputs a boolean value as true or false to Writer w.
|
||||||
|
func printBool(w io.Writer, val bool) {
|
||||||
|
if val {
|
||||||
|
w.Write(trueBytes)
|
||||||
|
} else {
|
||||||
|
w.Write(falseBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// printInt outputs a signed integer value to Writer w.
|
||||||
|
func printInt(w io.Writer, val int64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printUint outputs an unsigned integer value to Writer w.
|
||||||
|
func printUint(w io.Writer, val uint64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printFloat outputs a floating point value using the specified precision,
|
||||||
|
// which is expected to be 32 or 64bit, to Writer w.
|
||||||
|
func printFloat(w io.Writer, val float64, precision int) {
|
||||||
|
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printComplex outputs a complex value using the specified float precision
|
||||||
|
// for the real and imaginary parts to Writer w.
|
||||||
|
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||||
|
r := real(c)
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||||
|
i := imag(c)
|
||||||
|
if i >= 0 {
|
||||||
|
w.Write(plusBytes)
|
||||||
|
}
|
||||||
|
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||||
|
w.Write(iBytes)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||||
|
// prefix to Writer w.
|
||||||
|
func printHexPtr(w io.Writer, p uintptr) {
|
||||||
|
// Null pointer.
|
||||||
|
num := uint64(p)
|
||||||
|
if num == 0 {
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||||
|
buf := make([]byte, 18)
|
||||||
|
|
||||||
|
// It's simpler to construct the hex string right to left.
|
||||||
|
base := uint64(16)
|
||||||
|
i := len(buf) - 1
|
||||||
|
for num >= base {
|
||||||
|
buf[i] = hexDigits[num%base]
|
||||||
|
num /= base
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
buf[i] = hexDigits[num]
|
||||||
|
|
||||||
|
// Add '0x' prefix.
|
||||||
|
i--
|
||||||
|
buf[i] = 'x'
|
||||||
|
i--
|
||||||
|
buf[i] = '0'
|
||||||
|
|
||||||
|
// Strip unused leading bytes.
|
||||||
|
buf = buf[i:]
|
||||||
|
w.Write(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||||
|
// elements to be sorted.
|
||||||
|
type valuesSorter struct {
|
||||||
|
values []reflect.Value
|
||||||
|
strings []string // either nil or same len and values
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||||
|
// surrogate keys on which the data should be sorted. It uses flags in
|
||||||
|
// ConfigState to decide if and how to populate those surrogate keys.
|
||||||
|
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||||
|
vs := &valuesSorter{values: values, cs: cs}
|
||||||
|
if canSortSimply(vs.values[0].Kind()) {
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
if !cs.DisableMethods {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
if !handleMethods(cs, &b, vs.values[i]) {
|
||||||
|
vs.strings = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
vs.strings[i] = b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if vs.strings == nil && cs.SpewKeys {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
|
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||||
|
// directly, or whether it should be considered for sorting by surrogate keys
|
||||||
|
// (if the ConfigState allows it).
|
||||||
|
func canSortSimply(kind reflect.Kind) bool {
|
||||||
|
// This switch parallels valueSortLess, except for the default case.
|
||||||
|
switch kind {
|
||||||
|
case reflect.Bool:
|
||||||
|
return true
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return true
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
return true
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return true
|
||||||
|
case reflect.Array:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of values in the slice. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Len() int {
|
||||||
|
return len(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps the values at the passed indices. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Swap(i, j int) {
|
||||||
|
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||||
|
if s.strings != nil {
|
||||||
|
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueSortLess returns whether the first value should sort before the second
|
||||||
|
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||||
|
// implementation.
|
||||||
|
func valueSortLess(a, b reflect.Value) bool {
|
||||||
|
switch a.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return !a.Bool() && b.Bool()
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return a.Int() < b.Int()
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return a.Float() < b.Float()
|
||||||
|
case reflect.String:
|
||||||
|
return a.String() < b.String()
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Array:
|
||||||
|
// Compare the contents of both arrays.
|
||||||
|
l := a.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
av := a.Index(i)
|
||||||
|
bv := b.Index(i)
|
||||||
|
if av.Interface() == bv.Interface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return valueSortLess(av, bv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a.String() < b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less returns whether the value at index i should sort before the
|
||||||
|
// value at index j. It is part of the sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Less(i, j int) bool {
|
||||||
|
if s.strings == nil {
|
||||||
|
return valueSortLess(s.values[i], s.values[j])
|
||||||
|
}
|
||||||
|
return s.strings[i] < s.strings[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortValues is a sort function that handles both native types and any type that
|
||||||
|
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||||
|
// their Value.String() value to ensure display stability.
|
||||||
|
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort.Sort(newValuesSorter(values, cs))
|
||||||
|
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigState houses the configuration options used by spew to format and
|
||||||
|
// display values. There is a global instance, Config, that is used to control
|
||||||
|
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||||
|
// provides methods equivalent to the top-level functions.
|
||||||
|
//
|
||||||
|
// The zero value for ConfigState provides no indentation. You would typically
|
||||||
|
// want to set it to a space or a tab.
|
||||||
|
//
|
||||||
|
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||||
|
// with default settings. See the documentation of NewDefaultConfig for default
|
||||||
|
// values.
|
||||||
|
type ConfigState struct {
|
||||||
|
// Indent specifies the string to use for each indentation level. The
|
||||||
|
// global config instance that all top-level functions use set this to a
|
||||||
|
// single space by default. If you would like more indentation, you might
|
||||||
|
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// MaxDepth controls the maximum number of levels to descend into nested
|
||||||
|
// data structures. The default, 0, means there is no limit.
|
||||||
|
//
|
||||||
|
// NOTE: Circular data structures are properly detected, so it is not
|
||||||
|
// necessary to set this value unless you specifically want to limit deeply
|
||||||
|
// nested data structures.
|
||||||
|
MaxDepth int
|
||||||
|
|
||||||
|
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||||
|
// invoked for types that implement them.
|
||||||
|
DisableMethods bool
|
||||||
|
|
||||||
|
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||||
|
// error and Stringer interfaces on types which only accept a pointer
|
||||||
|
// receiver when the current type is not a pointer.
|
||||||
|
//
|
||||||
|
// NOTE: This might be an unsafe action since calling one of these methods
|
||||||
|
// with a pointer receiver could technically mutate the value, however,
|
||||||
|
// in practice, types which choose to satisify an error or Stringer
|
||||||
|
// interface with a pointer receiver should not be mutating their state
|
||||||
|
// inside these interface methods. As a result, this option relies on
|
||||||
|
// access to the unsafe package, so it will not have any effect when
|
||||||
|
// running in environments without access to the unsafe package such as
|
||||||
|
// Google App Engine or with the "safe" build tag specified.
|
||||||
|
DisablePointerMethods bool
|
||||||
|
|
||||||
|
// DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
// pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
DisablePointerAddresses bool
|
||||||
|
|
||||||
|
// DisableCapacities specifies whether to disable the printing of capacities
|
||||||
|
// for arrays, slices, maps and channels. This is useful when diffing
|
||||||
|
// data structures in tests.
|
||||||
|
DisableCapacities bool
|
||||||
|
|
||||||
|
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||||
|
// a custom error or Stringer interface is invoked. The default, false,
|
||||||
|
// means it will print the results of invoking the custom error or Stringer
|
||||||
|
// interface and return immediately instead of continuing to recurse into
|
||||||
|
// the internals of the data type.
|
||||||
|
//
|
||||||
|
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||||
|
// via the DisableMethods or DisablePointerMethods options.
|
||||||
|
ContinueOnMethod bool
|
||||||
|
|
||||||
|
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||||
|
// this to have a more deterministic, diffable output. Note that only
|
||||||
|
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||||
|
// that support the error or Stringer interfaces (if methods are
|
||||||
|
// enabled) are supported, with other types sorted according to the
|
||||||
|
// reflect.Value.String() output which guarantees display stability.
|
||||||
|
SortKeys bool
|
||||||
|
|
||||||
|
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||||
|
// be spewed to strings and sorted by those strings. This is only
|
||||||
|
// considered if SortKeys is true.
|
||||||
|
SpewKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the active configuration of the top-level functions.
|
||||||
|
// The configuration can be changed by modifying the contents of spew.Config.
|
||||||
|
var Config = ConfigState{Indent: " "}
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the formatted string as a value that satisfies error. See NewFormatter
|
||||||
|
// for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
c.Printf, c.Println, or c.Printf.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(c, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(c, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by modifying the public members
|
||||||
|
of c. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) Dump(a ...interface{}) {
|
||||||
|
fdump(c, os.Stdout, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(c, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a spew Formatter interface using
|
||||||
|
// the ConfigState associated with s.
|
||||||
|
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = newFormatter(c, arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||||
|
//
|
||||||
|
// Indent: " "
|
||||||
|
// MaxDepth: 0
|
||||||
|
// DisableMethods: false
|
||||||
|
// DisablePointerMethods: false
|
||||||
|
// ContinueOnMethod: false
|
||||||
|
// SortKeys: false
|
||||||
|
func NewDefaultConfig() *ConfigState {
|
||||||
|
return &ConfigState{Indent: " "}
|
||||||
|
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||||
|
debugging.
|
||||||
|
|
||||||
|
A quick overview of the additional features spew provides over the built-in
|
||||||
|
printing facilities for Go data types are as follows:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output (only when using
|
||||||
|
Dump style)
|
||||||
|
|
||||||
|
There are two different approaches spew allows for dumping Go data structures:
|
||||||
|
|
||||||
|
* Dump style which prints with newlines, customizable indentation,
|
||||||
|
and additional debug information such as types and all pointer addresses
|
||||||
|
used to indirect to the final value
|
||||||
|
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||||
|
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||||
|
similar to the default %v while providing the additional functionality
|
||||||
|
outlined above and passing unsupported format verbs such as %x and %q
|
||||||
|
along to fmt
|
||||||
|
|
||||||
|
Quick Start
|
||||||
|
|
||||||
|
This section demonstrates how to quickly get started with spew. See the
|
||||||
|
sections below for further details on formatting and configuration options.
|
||||||
|
|
||||||
|
To dump a variable with full newlines, indentation, type, and pointer
|
||||||
|
information use Dump, Fdump, or Sdump:
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||||
|
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||||
|
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||||
|
%#+v (adds types and pointer addresses):
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
Configuration Options
|
||||||
|
|
||||||
|
Configuration of spew is handled by fields in the ConfigState type. For
|
||||||
|
convenience, all of the top-level functions use a global state available
|
||||||
|
via the spew.Config global.
|
||||||
|
|
||||||
|
It is also possible to create a ConfigState instance that provides methods
|
||||||
|
equivalent to the top-level functions. This allows concurrent configuration
|
||||||
|
options. See the ConfigState documentation for more details.
|
||||||
|
|
||||||
|
The following configuration options are available:
|
||||||
|
* Indent
|
||||||
|
String to use for each indentation level for Dump functions.
|
||||||
|
It is a single space by default. A popular alternative is "\t".
|
||||||
|
|
||||||
|
* MaxDepth
|
||||||
|
Maximum number of levels to descend into nested data structures.
|
||||||
|
There is no limit by default.
|
||||||
|
|
||||||
|
* DisableMethods
|
||||||
|
Disables invocation of error and Stringer interface methods.
|
||||||
|
Method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerMethods
|
||||||
|
Disables invocation of error and Stringer interface methods on types
|
||||||
|
which only accept pointer receivers from non-pointer variables.
|
||||||
|
Pointer method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerAddresses
|
||||||
|
DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
|
||||||
|
* DisableCapacities
|
||||||
|
DisableCapacities specifies whether to disable the printing of
|
||||||
|
capacities for arrays, slices, maps and channels. This is useful when
|
||||||
|
diffing data structures in tests.
|
||||||
|
|
||||||
|
* ContinueOnMethod
|
||||||
|
Enables recursion into types after invoking error and Stringer interface
|
||||||
|
methods. Recursion after method invocation is disabled by default.
|
||||||
|
|
||||||
|
* SortKeys
|
||||||
|
Specifies map keys should be sorted before being printed. Use
|
||||||
|
this to have a more deterministic, diffable output. Note that
|
||||||
|
only native types (bool, int, uint, floats, uintptr and string)
|
||||||
|
and types which implement error or Stringer interfaces are
|
||||||
|
supported with other types sorted according to the
|
||||||
|
reflect.Value.String() output which guarantees display
|
||||||
|
stability. Natural map order is used by default.
|
||||||
|
|
||||||
|
* SpewKeys
|
||||||
|
Specifies that, as a last resort attempt, map keys should be
|
||||||
|
spewed to strings and sorted by those strings. This is only
|
||||||
|
considered if SortKeys is true.
|
||||||
|
|
||||||
|
Dump Usage
|
||||||
|
|
||||||
|
Simply call spew.Dump with a list of variables you want to dump:
|
||||||
|
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||||
|
io.Writer. For example, to dump to standard error:
|
||||||
|
|
||||||
|
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||||
|
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Sample Dump Output
|
||||||
|
|
||||||
|
See the Dump example for details on the setup of the types and variables being
|
||||||
|
shown here.
|
||||||
|
|
||||||
|
(main.Foo) {
|
||||||
|
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||||
|
flag: (main.Flag) flagTwo,
|
||||||
|
data: (uintptr) <nil>
|
||||||
|
}),
|
||||||
|
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||||
|
(string) (len=3) "one": (bool) true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||||
|
command as shown.
|
||||||
|
([]uint8) (len=32 cap=32) {
|
||||||
|
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||||
|
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||||
|
00000020 31 32 |12|
|
||||||
|
}
|
||||||
|
|
||||||
|
Custom Formatter
|
||||||
|
|
||||||
|
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||||
|
so that it integrates cleanly with standard fmt package printing functions. The
|
||||||
|
formatter is useful for inline printing of smaller data types similar to the
|
||||||
|
standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Custom Formatter Usage
|
||||||
|
|
||||||
|
The simplest way to make use of the spew custom formatter is to call one of the
|
||||||
|
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||||
|
functions have syntax you are most likely already familiar with:
|
||||||
|
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Println(myVar, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
See the Index for the full list convenience functions.
|
||||||
|
|
||||||
|
Sample Formatter Output
|
||||||
|
|
||||||
|
Double pointer to a uint8:
|
||||||
|
%v: <**>5
|
||||||
|
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||||
|
%#v: (**uint8)5
|
||||||
|
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||||
|
|
||||||
|
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||||
|
%v: <*>{1 <*><shown>}
|
||||||
|
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||||
|
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||||
|
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||||
|
|
||||||
|
See the Printf example for details on the setup of variables being shown
|
||||||
|
here.
|
||||||
|
|
||||||
|
Errors
|
||||||
|
|
||||||
|
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||||
|
detects them and handles them internally by printing the panic information
|
||||||
|
inline with the output. Since spew is intended to provide deep pretty printing
|
||||||
|
capabilities on structures, it intentionally does not return any errors.
|
||||||
|
*/
|
||||||
|
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
@ -0,0 +1,509 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||||
|
// convert cgo types to uint8 slices for hexdumping.
|
||||||
|
uint8Type = reflect.TypeOf(uint8(0))
|
||||||
|
|
||||||
|
// cCharRE is a regular expression that matches a cgo char.
|
||||||
|
// It is used to detect character arrays to hexdump them.
|
||||||
|
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||||
|
|
||||||
|
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||||
|
// char. It is used to detect unsigned character arrays to hexdump
|
||||||
|
// them.
|
||||||
|
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||||
|
|
||||||
|
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||||
|
// It is used to detect uint8_t arrays to hexdump them.
|
||||||
|
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// dumpState contains information about the state of a dump operation.
|
||||||
|
type dumpState struct {
|
||||||
|
w io.Writer
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
ignoreNextIndent bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// indent performs indentation according to the depth level and cs.Indent
|
||||||
|
// option.
|
||||||
|
func (d *dumpState) indent() {
|
||||||
|
if d.ignoreNextIndent {
|
||||||
|
d.ignoreNextIndent = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range d.pointers {
|
||||||
|
if depth >= d.depth {
|
||||||
|
delete(d.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by dereferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d.pointers[addr] = d.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type information.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
d.w.Write([]byte(ve.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
|
||||||
|
// Display pointer information.
|
||||||
|
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
d.w.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(d.w, addr)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
d.w.Write(circularBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
d.ignoreNextType = true
|
||||||
|
d.dump(ve)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||||
|
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||||
|
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||||
|
// Determine whether this type should be hex dumped or not. Also,
|
||||||
|
// for types which should be hexdumped, try to use the underlying data
|
||||||
|
// first, then fall back to trying to convert them to a uint8 slice.
|
||||||
|
var buf []uint8
|
||||||
|
doConvert := false
|
||||||
|
doHexDump := false
|
||||||
|
numEntries := v.Len()
|
||||||
|
if numEntries > 0 {
|
||||||
|
vt := v.Index(0).Type()
|
||||||
|
vts := vt.String()
|
||||||
|
switch {
|
||||||
|
// C types that need to be converted.
|
||||||
|
case cCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUnsignedCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUint8tCharRE.MatchString(vts):
|
||||||
|
doConvert = true
|
||||||
|
|
||||||
|
// Try to use existing uint8 slices and fall back to converting
|
||||||
|
// and copying if that fails.
|
||||||
|
case vt.Kind() == reflect.Uint8:
|
||||||
|
// We need an addressable interface to convert the type
|
||||||
|
// to a byte slice. However, the reflect package won't
|
||||||
|
// give us an interface on certain things like
|
||||||
|
// unexported struct fields in order to enforce
|
||||||
|
// visibility rules. We use unsafe, when available, to
|
||||||
|
// bypass these restrictions since this package does not
|
||||||
|
// mutate the values.
|
||||||
|
vs := v
|
||||||
|
if !vs.CanInterface() || !vs.CanAddr() {
|
||||||
|
vs = unsafeReflectValue(vs)
|
||||||
|
}
|
||||||
|
if !UnsafeDisabled {
|
||||||
|
vs = vs.Slice(0, numEntries)
|
||||||
|
|
||||||
|
// Use the existing uint8 slice if it can be
|
||||||
|
// type asserted.
|
||||||
|
iface := vs.Interface()
|
||||||
|
if slice, ok := iface.([]uint8); ok {
|
||||||
|
buf = slice
|
||||||
|
doHexDump = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The underlying data needs to be converted if it can't
|
||||||
|
// be type asserted to a uint8 slice.
|
||||||
|
doConvert = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy and convert the underlying type if needed.
|
||||||
|
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||||
|
// Convert and copy each element into a uint8 byte
|
||||||
|
// slice.
|
||||||
|
buf = make([]uint8, numEntries)
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
vv := v.Index(i)
|
||||||
|
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||||
|
}
|
||||||
|
doHexDump = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hexdump the entire slice as needed.
|
||||||
|
if doHexDump {
|
||||||
|
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||||
|
str := indent + hex.Dump(buf)
|
||||||
|
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||||
|
str = strings.TrimRight(str, d.cs.Indent)
|
||||||
|
d.w.Write([]byte(str))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively call dump for each item.
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
d.dump(d.unpackValue(v.Index(i)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||||
|
// value to figure out what kind of object we are dealing with and formats it
|
||||||
|
// appropriately. It is a recursive function, however circular data structures
|
||||||
|
// are detected and handled properly.
|
||||||
|
func (d *dumpState) dump(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
d.w.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
d.indent()
|
||||||
|
d.dumpPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !d.ignoreNextType {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write([]byte(v.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.ignoreNextType = false
|
||||||
|
|
||||||
|
// Display length and capacity if the built-in len and cap functions
|
||||||
|
// work with the value's kind and the len/cap itself is non-zero.
|
||||||
|
valueLen, valueCap := 0, 0
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||||
|
valueLen, valueCap = v.Len(), v.Cap()
|
||||||
|
case reflect.Map, reflect.String:
|
||||||
|
valueLen = v.Len()
|
||||||
|
}
|
||||||
|
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(lenEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueLen), 10)
|
||||||
|
}
|
||||||
|
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.w.Write(capEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueCap), 10)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||||
|
// is enabled
|
||||||
|
if !d.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(d.w, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(d.w, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(d.w, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(d.w, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(d.w, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(d.w, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(d.w, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.dumpSlice(v)
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if d.cs.SortKeys {
|
||||||
|
sortValues(keys, d.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
d.dump(d.unpackValue(key))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
numFields := v.NumField()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
d.indent()
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
d.w.Write([]byte(vtf.Name))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.Field(i)))
|
||||||
|
if i < (numFields - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(d.w, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(d.w, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it in case any new
|
||||||
|
// types are added.
|
||||||
|
default:
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdump is a helper function to consolidate the logic from the various public
|
||||||
|
// methods which take varying writers and config states.
|
||||||
|
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||||
|
for _, arg := range a {
|
||||||
|
if arg == nil {
|
||||||
|
w.Write(interfaceBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
w.Write(newlineBytes)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d := dumpState{w: w, cs: cs}
|
||||||
|
d.pointers = make(map[uintptr]int)
|
||||||
|
d.dump(reflect.ValueOf(arg))
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(&Config, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(&Config, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by an exported package global,
|
||||||
|
spew.Config. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func Dump(a ...interface{}) {
|
||||||
|
fdump(&Config, os.Stdout, a...)
|
||||||
|
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||||
|
const supportedFlags = "0-+# "
|
||||||
|
|
||||||
|
// formatState implements the fmt.Formatter interface and contains information
|
||||||
|
// about the state of a formatting operation. The NewFormatter function can
|
||||||
|
// be used to get a new Formatter which can be used directly as arguments
|
||||||
|
// in standard fmt package printing calls.
|
||||||
|
type formatState struct {
|
||||||
|
value interface{}
|
||||||
|
fs fmt.State
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildDefaultFormat recreates the original format string without precision
|
||||||
|
// and width information to pass in to fmt.Sprintf in the case of an
|
||||||
|
// unrecognized type. Unless new types are added to the language, this
|
||||||
|
// function won't ever be called.
|
||||||
|
func (f *formatState) buildDefaultFormat() (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune('v')
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// constructOrigFormat recreates the original format string including precision
|
||||||
|
// and width information to pass along to the standard fmt package. This allows
|
||||||
|
// automatic deferral of all format strings this package doesn't support.
|
||||||
|
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if width, ok := f.fs.Width(); ok {
|
||||||
|
buf.WriteString(strconv.Itoa(width))
|
||||||
|
}
|
||||||
|
|
||||||
|
if precision, ok := f.fs.Precision(); ok {
|
||||||
|
buf.Write(precisionBytes)
|
||||||
|
buf.WriteString(strconv.Itoa(precision))
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune(verb)
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||||
|
// ensures that types for values which have been unpacked from an interface
|
||||||
|
// are displayed when the show types flag is also set.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface {
|
||||||
|
f.ignoreNextType = false
|
||||||
|
if !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (f *formatState) formatPtr(v reflect.Value) {
|
||||||
|
// Display nil if top level pointer is nil.
|
||||||
|
showTypes := f.fs.Flag('#')
|
||||||
|
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range f.pointers {
|
||||||
|
if depth >= f.depth {
|
||||||
|
delete(f.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to possibly show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by derferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.pointers[addr] = f.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type or indirection level depending on flags.
|
||||||
|
if showTypes && !f.ignoreNextType {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
f.fs.Write([]byte(ve.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
} else {
|
||||||
|
if nilFound || cycleFound {
|
||||||
|
indirects += strings.Count(ve.Type().String(), "*")
|
||||||
|
}
|
||||||
|
f.fs.Write(openAngleBytes)
|
||||||
|
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||||
|
f.fs.Write(closeAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display pointer information depending on flags.
|
||||||
|
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(f.fs, addr)
|
||||||
|
}
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
f.fs.Write(circularShortBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(ve)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// format is the main workhorse for providing the Formatter interface. It
|
||||||
|
// uses the passed reflect value to figure out what kind of object we are
|
||||||
|
// dealing with and formats it appropriately. It is a recursive function,
|
||||||
|
// however circular data structures are detected and handled properly.
|
||||||
|
func (f *formatState) format(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
f.fs.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
f.formatPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write([]byte(v.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = false
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods
|
||||||
|
// flag is enabled.
|
||||||
|
if !f.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(f.fs, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(f.fs, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(f.fs, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(f.fs, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(f.fs, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(f.fs, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(f.fs, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
f.fs.Write(openBracketBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.Index(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBracketBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
f.fs.Write([]byte(v.String()))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fs.Write(openMapBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if f.cs.SortKeys {
|
||||||
|
sortValues(keys, f.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(key))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.MapIndex(key)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeMapBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
numFields := v.NumField()
|
||||||
|
f.fs.Write(openBraceBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||||
|
f.fs.Write([]byte(vtf.Name))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
}
|
||||||
|
f.format(f.unpackValue(v.Field(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(f.fs, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it if any get added.
|
||||||
|
default:
|
||||||
|
format := f.buildDefaultFormat()
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(f.fs, format, v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f.fs, format, v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||||
|
// details.
|
||||||
|
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||||
|
f.fs = fs
|
||||||
|
|
||||||
|
// Use standard formatting for verbs that are not v.
|
||||||
|
if verb != 'v' {
|
||||||
|
format := f.constructOrigFormat(verb)
|
||||||
|
fmt.Fprintf(fs, format, f.value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.value == nil {
|
||||||
|
if fs.Flag('#') {
|
||||||
|
fs.Write(interfaceBytes)
|
||||||
|
}
|
||||||
|
fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.format(reflect.ValueOf(f.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFormatter is a helper function to consolidate the logic from the various
|
||||||
|
// public methods which take varying config states.
|
||||||
|
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||||
|
fs := &formatState{value: v, cs: cs}
|
||||||
|
fs.pointers = make(map[uintptr]int)
|
||||||
|
return fs
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
Printf, Println, or Fprintf.
|
||||||
|
*/
|
||||||
|
func NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(&Config, v)
|
||||||
|
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the formatted string as a value that satisfies error. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a default spew Formatter interface.
|
||||||
|
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = NewFormatter(arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
19
vendor/github.com/davidlazar/go-crypto/LICENSE
generated
vendored
Normal file
19
vendor/github.com/davidlazar/go-crypto/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2016 David Lazar <lazard@mit.edu>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
19
vendor/github.com/davidlazar/go-crypto/drbg/LICENSE
generated
vendored
Normal file
19
vendor/github.com/davidlazar/go-crypto/drbg/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Copyright (c) 2013 David Lazar <lazard@mit.edu>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
85
vendor/github.com/davidlazar/go-crypto/salsa20/salsa20.go
generated
vendored
Normal file
85
vendor/github.com/davidlazar/go-crypto/salsa20/salsa20.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package salsa20
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/salsa20/salsa"
|
||||||
|
)
|
||||||
|
|
||||||
|
const BlockSize = 64
|
||||||
|
|
||||||
|
type salsaCipher struct {
|
||||||
|
key *[32]byte
|
||||||
|
nonce [8]byte
|
||||||
|
x [BlockSize]byte
|
||||||
|
nx int
|
||||||
|
counter uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(key *[32]byte, nonce []byte) cipher.Stream {
|
||||||
|
c := new(salsaCipher)
|
||||||
|
|
||||||
|
if len(nonce) == 24 {
|
||||||
|
var subKey [32]byte
|
||||||
|
var hNonce [16]byte
|
||||||
|
copy(hNonce[:], nonce[:16])
|
||||||
|
salsa.HSalsa20(&subKey, &hNonce, key, &salsa.Sigma)
|
||||||
|
copy(c.nonce[:], nonce[16:])
|
||||||
|
c.key = &subKey
|
||||||
|
} else if len(nonce) == 8 {
|
||||||
|
c.key = key
|
||||||
|
copy(c.nonce[:], nonce)
|
||||||
|
} else {
|
||||||
|
panic("salsa20: nonce must be 8 or 24 bytes")
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *salsaCipher) XORKeyStream(dst, src []byte) {
|
||||||
|
if len(dst) < len(src) {
|
||||||
|
src = src[:len(dst)]
|
||||||
|
}
|
||||||
|
if c.nx > 0 {
|
||||||
|
n := xorBytes(dst, src, c.x[c.nx:])
|
||||||
|
c.nx += n
|
||||||
|
if c.nx == BlockSize {
|
||||||
|
c.nx = 0
|
||||||
|
}
|
||||||
|
src = src[n:]
|
||||||
|
dst = dst[n:]
|
||||||
|
}
|
||||||
|
if len(src) > BlockSize {
|
||||||
|
n := len(src) &^ (BlockSize - 1)
|
||||||
|
c.blocks(dst, src[:n])
|
||||||
|
src = src[n:]
|
||||||
|
dst = dst[n:]
|
||||||
|
}
|
||||||
|
if len(src) > 0 {
|
||||||
|
c.nx = copy(c.x[:], src)
|
||||||
|
for i := c.nx; i < len(c.x); i++ {
|
||||||
|
c.x[i] = 0
|
||||||
|
}
|
||||||
|
c.blocks(c.x[:], c.x[:])
|
||||||
|
copy(dst, c.x[:c.nx])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *salsaCipher) blocks(dst, src []byte) {
|
||||||
|
var nonce [16]byte
|
||||||
|
copy(nonce[:], c.nonce[:])
|
||||||
|
binary.LittleEndian.PutUint64(nonce[8:], c.counter)
|
||||||
|
salsa.XORKeyStream(dst, src, &nonce, c.key)
|
||||||
|
c.counter += uint64(len(src)) / 64
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorBytes(dst, a, b []byte) int {
|
||||||
|
n := len(a)
|
||||||
|
if len(b) < n {
|
||||||
|
n = len(b)
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
dst[i] = a[i] ^ b[i]
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
1
vendor/github.com/dgraph-io/badger/.gitignore
generated
vendored
Normal file
1
vendor/github.com/dgraph-io/badger/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
p/
|
20
vendor/github.com/dgraph-io/badger/.golangci.yml
generated
vendored
Normal file
20
vendor/github.com/dgraph-io/badger/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
run:
|
||||||
|
tests: false
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
lll:
|
||||||
|
line-length: 100
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- errcheck
|
||||||
|
- ineffassign
|
||||||
|
- gas
|
||||||
|
- gofmt
|
||||||
|
- golint
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- lll
|
||||||
|
- varcheck
|
||||||
|
- unused
|
25
vendor/github.com/dgraph-io/badger/.travis.yml
generated
vendored
Normal file
25
vendor/github.com/dgraph-io/badger/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- "1.9"
|
||||||
|
- "1.10"
|
||||||
|
- "1.11"
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: osx
|
||||||
|
notifications:
|
||||||
|
email: false
|
||||||
|
slack:
|
||||||
|
secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk=
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8=
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- go get github.com/mattn/goveralls
|
||||||
|
script:
|
||||||
|
- bash contrib/cover.sh $HOME/build coverage.out || travis_terminate 1
|
||||||
|
- goveralls -service=travis-ci -coverprofile=coverage.out || true
|
||||||
|
- goveralls -coverprofile=coverage.out -service=travis-ci
|
100
vendor/github.com/dgraph-io/badger/CHANGELOG.md
generated
vendored
Normal file
100
vendor/github.com/dgraph-io/badger/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
# Changelog
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||||
|
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [1.5.3] - 2018-07-11
|
||||||
|
Bug Fixes:
|
||||||
|
* Fix a panic caused due to item.vptr not copying over vs.Value, when looking
|
||||||
|
for a move key.
|
||||||
|
|
||||||
|
## [1.5.2] - 2018-06-19
|
||||||
|
Bug Fixes:
|
||||||
|
* Fix the way move key gets generated.
|
||||||
|
* If a transaction has unclosed, or multiple iterators running simultaneously,
|
||||||
|
throw a panic. Every iterator must be properly closed. At any point in time,
|
||||||
|
only one iterator per transaction can be running. This is to avoid bugs in a
|
||||||
|
transaction data structure which is thread unsafe.
|
||||||
|
|
||||||
|
* *Warning: This change might cause panics in user code. Fix is to properly
|
||||||
|
close your iterators, and only have one running at a time per transaction.*
|
||||||
|
|
||||||
|
## [1.5.1] - 2018-06-04
|
||||||
|
Bug Fixes:
|
||||||
|
* Fix for infinite yieldItemValue recursion. #503
|
||||||
|
* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f
|
||||||
|
* Use file size based window size for sampling, instead of fixing it to 10MB. #501
|
||||||
|
|
||||||
|
Cleanup:
|
||||||
|
* Clarify comments and documentation.
|
||||||
|
* Move badger tool one directory level up.
|
||||||
|
|
||||||
|
## [1.5.0] - 2018-05-08
|
||||||
|
* Introduce `NumVersionsToKeep` option. This option is used to discard many
|
||||||
|
versions of the same key, which saves space.
|
||||||
|
* Add a new `SetWithDiscard` method, which would indicate that all the older
|
||||||
|
versions of the key are now invalid. Those versions would be discarded during
|
||||||
|
compactions.
|
||||||
|
* Value log GC moves are now bound to another keyspace to ensure latest versions
|
||||||
|
of data are always at the top in LSM tree.
|
||||||
|
* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per
|
||||||
|
value log file. This helps bound the time it takes to garbage collect one
|
||||||
|
file.
|
||||||
|
|
||||||
|
## [1.4.0] - 2018-05-04
|
||||||
|
* Make mmap-ing of value log optional.
|
||||||
|
* Run GC multiple times, based on recorded discard statistics.
|
||||||
|
* Add MergeOperator.
|
||||||
|
* Force compact L0 on clsoe (#439).
|
||||||
|
* Add truncate option to warn about data loss (#452).
|
||||||
|
* Discard key versions during compaction (#464).
|
||||||
|
* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB.
|
||||||
|
|
||||||
|
Bug fix:
|
||||||
|
* (Temporary) Check max version across all tables in Get (removed in next
|
||||||
|
release).
|
||||||
|
* Update commit and read ts while loading from backup.
|
||||||
|
* Ensure all transaction entries are part of the same value log file.
|
||||||
|
* On commit, run unlock callbacks before doing writes (#413).
|
||||||
|
* Wait for goroutines to finish before closing iterators (#421).
|
||||||
|
|
||||||
|
## [1.3.0] - 2017-12-12
|
||||||
|
* Add `DB.NextSequence()` method to generate monotonically increasing integer
|
||||||
|
sequences.
|
||||||
|
* Add `DB.Size()` method to return the size of LSM and value log files.
|
||||||
|
* Tweaked mmap code to make Windows 32-bit builds work.
|
||||||
|
* Tweaked build tags on some files to make iOS builds work.
|
||||||
|
* Fix `DB.PurgeOlderVersions()` to not violate some constraints.
|
||||||
|
|
||||||
|
## [1.2.0] - 2017-11-30
|
||||||
|
* Expose a `Txn.SetEntry()` method to allow setting the key-value pair
|
||||||
|
and all the metadata at the same time.
|
||||||
|
|
||||||
|
## [1.1.1] - 2017-11-28
|
||||||
|
* Fix bug where txn.Get was returing key deleted in same transaction.
|
||||||
|
* Fix race condition while decrementing reference in oracle.
|
||||||
|
* Update doneCommit in the callback for CommitAsync.
|
||||||
|
* Iterator see writes of current txn.
|
||||||
|
|
||||||
|
## [1.1.0] - 2017-11-13
|
||||||
|
* Create Badger directory if it does not exist when `badger.Open` is called.
|
||||||
|
* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations
|
||||||
|
* Fixed 64-bit alignment issues to make Badger run on Arm v7
|
||||||
|
|
||||||
|
## [1.0.1] - 2017-11-06
|
||||||
|
* Fix an uint16 overflow when resizing key slice
|
||||||
|
|
||||||
|
[Unreleased]: https://github.com/dgraph-io/badger/compare/v1.5.3...HEAD
|
||||||
|
[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3
|
||||||
|
[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2
|
||||||
|
[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1
|
||||||
|
[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0
|
||||||
|
[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0
|
||||||
|
[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0
|
||||||
|
[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0
|
||||||
|
[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1
|
||||||
|
[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0
|
||||||
|
[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user