Replace dep with go mod

This commit is contained in:
Rob Mulholand 2019-07-17 13:22:14 -05:00
parent 987abd4b2e
commit f51f5c64a0
5907 changed files with 293 additions and 2079166 deletions

566
Gopkg.lock generated
View File

@ -1,566 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/allegro/bigcache"
packages = [
".",
"queue"
]
revision = "69ea0af04088faa57adb9ac683934277141e92a5"
version = "v2.0.0"
[[projects]]
branch = "master"
name = "github.com/aristanetworks/goarista"
packages = ["monotime"]
revision = "ed1100a1c0154be237da0078e86b19c523c8c661"
[[projects]]
branch = "master"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
revision = "c26ffa870fd817666a857af1bf6498fabba1ffe3"
[[projects]]
name = "github.com/dave/jennifer"
packages = ["jen"]
revision = "14e399b6b5e8456c66c45c955fc27b568bacb5c9"
version = "v1.3.0"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
name = "github.com/deckarep/golang-set"
packages = ["."]
revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e"
version = "v1.7.1"
[[projects]]
name = "github.com/edsrzf/mmap-go"
packages = ["."]
revision = "188cc3b666ba704534fa4f96e9e61f21f1e1ba7c"
version = "v1.0.0"
[[projects]]
name = "github.com/elastic/gosigar"
packages = [
".",
"sys/windows"
]
revision = "f75810decf6f4d88b130bfc4d2ba7ccdcea0c01d"
version = "v0.10.4"
[[projects]]
name = "github.com/ethereum/go-ethereum"
packages = [
".",
"accounts",
"accounts/abi",
"accounts/abi/bind",
"accounts/external",
"accounts/keystore",
"accounts/scwallet",
"accounts/usbwallet",
"accounts/usbwallet/trezor",
"common",
"common/bitutil",
"common/hexutil",
"common/math",
"common/mclock",
"common/prque",
"consensus",
"consensus/clique",
"consensus/ethash",
"consensus/misc",
"core",
"core/bloombits",
"core/rawdb",
"core/state",
"core/types",
"core/vm",
"crypto",
"crypto/bn256",
"crypto/bn256/cloudflare",
"crypto/bn256/google",
"crypto/ecies",
"crypto/secp256k1",
"eth/downloader",
"ethclient",
"ethdb",
"ethdb/leveldb",
"ethdb/memorydb",
"event",
"internal/ethapi",
"log",
"metrics",
"p2p",
"p2p/discover",
"p2p/discv5",
"p2p/enode",
"p2p/enr",
"p2p/nat",
"p2p/netutil",
"params",
"rlp",
"rpc",
"signer/core",
"signer/storage",
"trie"
]
revision = "52f2461774bcb8cdd310f86b4bc501df5b783852"
version = "v1.9.0"
[[projects]]
name = "github.com/fsnotify/fsnotify"
packages = ["."]
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
branch = "master"
name = "github.com/gballet/go-libpcsclite"
packages = ["."]
revision = "2772fd86a8ff4306d2749f610a386bfee9e0d727"
[[projects]]
name = "github.com/go-stack/stack"
packages = ["."]
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
version = "v1.8.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = [
"proto",
"protoc-gen-go/descriptor"
]
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
version = "v1.3.2"
[[projects]]
name = "github.com/golang/snappy"
packages = ["."]
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
version = "v0.0.1"
[[projects]]
name = "github.com/google/uuid"
packages = ["."]
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
version = "v1.1.1"
[[projects]]
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
]
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
version = "v0.5.1"
[[projects]]
name = "github.com/hashicorp/hcl"
packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/printer",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token"
]
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]]
name = "github.com/hpcloud/tail"
packages = [
".",
"ratelimiter",
"util",
"watch",
"winfile"
]
revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5"
version = "v1.0.0"
[[projects]]
name = "github.com/huin/goupnp"
packages = [
".",
"dcps/internetgateway1",
"dcps/internetgateway2",
"httpu",
"scpd",
"soap",
"ssdp"
]
revision = "656e61dfadd241c7cbdd22a023fa81ecb6860ea8"
version = "v1.0.0"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
name = "github.com/jackpal/go-nat-pmp"
packages = ["."]
revision = "c9cfead9f2a36ddf3daa40ba269aa7f4bbba6b62"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/jmoiron/sqlx"
packages = [
".",
"reflectx"
]
revision = "38398a30ed8516ffda617a04c822de09df8a3ec5"
[[projects]]
branch = "master"
name = "github.com/karalabe/usb"
packages = ["."]
revision = "9be757f914c0907b7ddd561ea86eec15313ac022"
[[projects]]
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
version = "v1.0.2"
[[projects]]
name = "github.com/lib/pq"
packages = [
".",
"oid",
"scram"
]
revision = "3427c32cb71afc948325f299f040e53c1dd78979"
version = "v1.2.0"
[[projects]]
name = "github.com/magiconair/properties"
packages = ["."]
revision = "de8848e004dd33dc07a2947b3d76f618a7fc7ef1"
version = "v1.8.1"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "3ee7d812e62a0804a7d0a324e0249ca2db3476d3"
version = "v0.0.4"
[[projects]]
name = "github.com/mitchellh/go-homedir"
packages = ["."]
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
version = "v1.1.0"
[[projects]]
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]]
name = "github.com/olekukonko/tablewriter"
packages = ["."]
revision = "e6d60cf7ba1f42d86d54cdf5508611c4aafb3970"
version = "v0.0.1"
[[projects]]
name = "github.com/onsi/ginkgo"
packages = [
".",
"config",
"internal/codelocation",
"internal/containernode",
"internal/failer",
"internal/leafnodes",
"internal/remote",
"internal/spec",
"internal/spec_iterator",
"internal/specrunner",
"internal/suite",
"internal/testingtproxy",
"internal/writer",
"reporters",
"reporters/stenographer",
"reporters/stenographer/support/go-colorable",
"reporters/stenographer/support/go-isatty",
"types"
]
revision = "eea6ad008b96acdaa524f5b409513bf062b500ad"
version = "v1.8.0"
[[projects]]
name = "github.com/onsi/gomega"
packages = [
".",
"format",
"ghttp",
"internal/assertion",
"internal/asyncassertion",
"internal/oraclematcher",
"internal/testingtsupport",
"matchers",
"matchers/support/goraph/bipartitegraph",
"matchers/support/goraph/edge",
"matchers/support/goraph/node",
"matchers/support/goraph/util",
"types"
]
revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2"
version = "v1.5.0"
[[projects]]
name = "github.com/pborman/uuid"
packages = ["."]
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
version = "v1.2"
[[projects]]
name = "github.com/pelletier/go-toml"
packages = ["."]
revision = "728039f679cbcd4f6a54e080d2219a4c4928c546"
version = "v1.4.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
version = "v0.8.1"
[[projects]]
name = "github.com/pressly/goose"
packages = ["."]
revision = "e4b98955473e91a12fc7d8816c28d06376d1d92c"
version = "v2.6.0"
[[projects]]
name = "github.com/prometheus/tsdb"
packages = ["fileutil"]
revision = "d230c67aa180850b80ae49e07079f55df1da0502"
version = "v0.9.1"
[[projects]]
name = "github.com/rjeczalik/notify"
packages = ["."]
revision = "69d839f37b13a8cb7a78366f7633a4071cb43be7"
version = "v0.9.2"
[[projects]]
name = "github.com/rs/cors"
packages = ["."]
revision = "9a47f48565a795472d43519dd49aac781f3034fb"
version = "v1.6.0"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "839c75faf7f98a33d445d181f3018b5c3409a45e"
version = "v1.4.2"
[[projects]]
name = "github.com/spf13/afero"
packages = [
".",
"mem"
]
revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424"
version = "v1.2.2"
[[projects]]
name = "github.com/spf13/cast"
packages = ["."]
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
version = "v1.3.0"
[[projects]]
name = "github.com/spf13/cobra"
packages = ["."]
revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5"
version = "v0.0.5"
[[projects]]
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1"
version = "v1.1.0"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
name = "github.com/spf13/viper"
packages = ["."]
revision = "b5bf975e5823809fb22c7644d008757f78a4259e"
version = "v1.4.0"
[[projects]]
branch = "develop"
name = "github.com/status-im/keycard-go"
packages = ["derivationpath"]
revision = "d95853db0f480b9d6379009500acf44b21dc0be6"
[[projects]]
name = "github.com/steakknife/bloomfilter"
packages = ["."]
revision = "99ee86d9200fcc2ffde62f508329bd6627c0a307"
version = "1.0.4"
[[projects]]
name = "github.com/steakknife/hamming"
packages = ["."]
revision = "003c143a81c25ea5e263d692919c611c7122ae6b"
version = "0.2.5"
[[projects]]
name = "github.com/syndtr/goleveldb"
packages = [
"leveldb",
"leveldb/cache",
"leveldb/comparer",
"leveldb/errors",
"leveldb/filter",
"leveldb/iterator",
"leveldb/journal",
"leveldb/memdb",
"leveldb/opt",
"leveldb/storage",
"leveldb/table",
"leveldb/util"
]
revision = "9d007e481048296f09f59bd19bb7ae584563cd95"
version = "v1.0.0"
[[projects]]
name = "github.com/tyler-smith/go-bip39"
packages = [
".",
"wordlists"
]
revision = "2af0a847066a4f2669040ccd44a79c8eca10806a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/wsddn/go-ecdh"
packages = ["."]
revision = "48726bab92085232373de4ec5c51ce7b441c63a0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"curve25519",
"pbkdf2",
"ripemd160",
"scrypt",
"sha3",
"ssh/terminal"
]
revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"html",
"html/atom",
"html/charset",
"websocket"
]
revision = "da137c7871d730100384dbcf36e6f8fa493aef5b"
[[projects]]
branch = "master"
name = "golang.org/x/sync"
packages = ["errgroup"]
revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix",
"windows"
]
revision = "fae7ac547cb717d141c433a2a173315e216b64c4"
[[projects]]
name = "golang.org/x/text"
packages = [
"encoding",
"encoding/charmap",
"encoding/htmlindex",
"encoding/internal",
"encoding/internal/identifier",
"encoding/japanese",
"encoding/korean",
"encoding/simplifiedchinese",
"encoding/traditionalchinese",
"encoding/unicode",
"internal/gen",
"internal/language",
"internal/language/compact",
"internal/tag",
"internal/triegen",
"internal/ucd",
"internal/utf8internal",
"language",
"runes",
"transform",
"unicode/cldr",
"unicode/norm"
]
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
name = "gopkg.in/fsnotify.v1"
packages = ["."]
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
source = "gopkg.in/fsnotify/fsnotify.v1"
version = "v1.4.7"
[[projects]]
branch = "v2"
name = "gopkg.in/natefinch/npipe.v2"
packages = ["."]
revision = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6"
[[projects]]
branch = "v1"
name = "gopkg.in/tomb.v1"
packages = ["."]
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "7c2260ca67851d579e74e84e128e6934b3a8bb4bb4f450a5ab9a42f31c20e91d"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,58 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[override]]
name = "gopkg.in/fsnotify.v1"
source = "gopkg.in/fsnotify/fsnotify.v1"
[[override]]
name = "github.com/pressly/sup"
version = "0.5.3"
[[override]]
name = "github.com/golang/protobuf"
version = "1.3.2"
[[constraint]]
name = "github.com/onsi/ginkgo"
version = "1.4.0"
[[constraint]]
branch = "master"
name = "github.com/jmoiron/sqlx"
[[constraint]]
branch = "master"
name = "github.com/lib/pq"
[[constraint]]
name = "github.com/sirupsen/logrus"
version = "1.2.0"
[[constraint]]
name = "github.com/spf13/cobra"
version = "0.0.1"
[[constraint]]
name = "github.com/ethereum/go-ethereum"
version = "1.9.0"

View File

@ -3,11 +3,6 @@ BASE = $(GOPATH)/src/$(PACKAGE)
PKGS = go list ./... | grep -v "^vendor/"
# Tools
## Dependency management
DEP = $(BIN)/dep
$(BIN)/dep:
go get -u github.com/golang/dep/cmd/dep
## Testing library
GINKGO = $(BIN)/ginkgo
$(BIN)/ginkgo:
@ -32,7 +27,7 @@ $(BIN)/gometalinter.v2:
.PHONY: installtools
installtools: | $(LINT) $(GOOSE) $(GINKGO) $(DEP)
installtools: | $(LINT) $(GOOSE) $(GINKGO)
echo "Installing tools"
.PHONY: metalint
@ -58,11 +53,7 @@ integrationtest: | $(GINKGO) $(LINT)
go fmt ./...
$(GINKGO) -r integration_test/
.PHONY: dep
dep: | $(DEP)
$(DEP) ensure
build: dep
build:
go fmt ./...
go build

65
go.mod Normal file
View File

@ -0,0 +1,65 @@
module github.com/vulcanize/vulcanizedb
go 1.12
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/allegro/bigcache v1.1.0 // indirect
github.com/apilayer/freegeoip v3.5.0+incompatible // indirect
github.com/aristanetworks/goarista v0.0.0-20180907105523-ff33da284e76 // indirect
github.com/btcsuite/btcd v0.0.0-20180903232927-cff30e1d23fc // indirect
github.com/cespare/cp v1.1.1 // indirect
github.com/dave/jennifer v1.3.0
github.com/deckarep/golang-set v1.7.1 // indirect
github.com/docker/docker v1.13.1 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/elastic/gosigar v0.10.4 // indirect
github.com/ethereum/go-ethereum v1.9.0
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-sql-driver/mysql v1.4.1 // indirect
github.com/golang/protobuf v1.3.2 // indirect
github.com/google/uuid v1.0.0 // indirect
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598 // indirect
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47
github.com/howeyc/fsnotify v0.9.0 // indirect
github.com/hpcloud/tail v1.0.0
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/influxdata/influxdb v1.7.7 // indirect
github.com/jackpal/go-nat-pmp v1.0.1 // indirect
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0 // indirect
github.com/lib/pq v1.0.0
github.com/mattn/go-colorable v0.1.2 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/mattn/go-sqlite3 v1.11.0 // indirect
github.com/mitchellh/go-homedir v1.0.0
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/olekukonko/tablewriter v0.0.1 // indirect
github.com/onsi/ginkgo v1.6.0
github.com/onsi/gomega v1.4.2
github.com/oschwald/maxminddb-golang v1.3.1 // indirect
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/pressly/goose v2.6.0+incompatible
github.com/prometheus/tsdb v0.9.1 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.5.0 // indirect
github.com/sirupsen/logrus v1.2.0
github.com/spf13/cobra v0.0.3
github.com/spf13/viper v1.2.0
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f // indirect
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2 // indirect
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d // indirect
github.com/tyler-smith/go-bip39 v1.0.0 // indirect
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 // indirect
golang.org/x/net v0.0.0-20190603091049-60506f45cf65
golang.org/x/sync v0.0.0-20190423024810-112230192c58
google.golang.org/appengine v1.6.1 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
)

226
go.sum Normal file
View File

@ -0,0 +1,226 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.1.0 h1:MLuIKTjdxDc+qsG2rhjsYjsHQC5LUGjIWzutg7M+W68=
github.com/allegro/bigcache v1.1.0/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/apilayer/freegeoip v3.5.0+incompatible h1:z1u2gv0/rsSi/HqMDB436AiUROXXim7st5DOg4Ikl4A=
github.com/apilayer/freegeoip v3.5.0+incompatible/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU=
github.com/aristanetworks/goarista v0.0.0-20180907105523-ff33da284e76 h1:64W/KrGykPTfDI9xTkZtnjZRYA5p2+c/IuGgjzeWCpI=
github.com/aristanetworks/goarista v0.0.0-20180907105523-ff33da284e76/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/btcsuite/btcd v0.0.0-20180903232927-cff30e1d23fc h1:nLRj+ULuRYb0qTAOnuayFXRnLjYXBots5CSp5zH4RqU=
github.com/btcsuite/btcd v0.0.0-20180903232927-cff30e1d23fc/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/dave/jennifer v1.3.0 h1:p3tl41zjjCZTNBytMwrUuiAnherNUZktlhPTKoF/sEk=
github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.10.4 h1:6jfw75dsoflhBMRdO6QPzQUgLqUYTsQQQRkkcsHsuPo=
github.com/elastic/gosigar v0.10.4/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
github.com/ethereum/go-ethereum v1.9.0 h1:9Kaf7UfDkV3aIUJlf14hI/GgEgRAUq60u4fBlb9dLWw=
github.com/ethereum/go-ethereum v1.9.0/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598 h1:XLoCW/kXxbvPvp216Kq/c+TtwWYHy9sjeDidFcG45g0=
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598/go.mod h1:Au3iQ8DvDis8hZ4q2OzRcaKYlAsPt+fYvib5q4nIqu4=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/howeyc/fsnotify v0.9.0 h1:0gtV5JmOKH4A8SsFxG2BczSeXWWPvcMT0euZt5gDAxY=
github.com/howeyc/fsnotify v0.9.0/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324 h1:PV190X5/DzQ/tbFFG5YpT5mH6q+cHlfgqI5JuRnH9oE=
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb v1.7.7 h1:UvNzAPfBrKMENVbQ4mr4ccA9sW+W1Ihl0Yh1s0BiVAg=
github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0 h1:5B0uxl2lzNRVkJVg+uGHxWtRt4C0Wjc6kJKo5XYx8xE=
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0 h1:S8kWZLXHpcOq3nGAvIs0oDgd4CXxkxE3hkDVRjTu7ro=
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.0.0 h1:vVpGvMXJPqSDh2VYHF7gsfQj8Ncx+Xw5Y1KHeTRY+7I=
github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/oschwald/maxminddb-golang v1.3.1 h1:kPc5+ieL5CC/Zn0IaXJPxDFlUxKTQEU8QBTtmfQDAIo=
github.com/oschwald/maxminddb-golang v1.3.1/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY=
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 h1:zNBQb37RGLmJybyMcs983HfUfpkw9OTFD9tbBfAViHE=
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose v2.6.0+incompatible h1:3f8zIQ8rfgP9tyI0Hmcs2YNAqUCL1c+diLe3iU8Qd/k=
github.com/pressly/goose v2.6.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.9.1 h1:IWaAmWkYlgG7/S4iw4IpAQt5Y35QaZM6/GsZ7GsjAuk=
github.com/prometheus/tsdb v0.9.1/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rs/cors v1.5.0 h1:dgSHE6+ia18arGOTIYQKKGWLvEbGvmbNE6NfxhoNHUY=
github.com/rs/cors v1.5.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.2.0 h1:M4Rzxlu+RgU4pyBRKhKaVN1VeYOm8h2jgyXnAseDgCc=
github.com/spf13/viper v1.2.0/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f h1:T7YHzO3/eqD/kv5m9+TLM4XuEAkN7NPj5pnZHqaOo/Q=
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2 h1:o6NMd68tuqfQ0ZFnz2d16xzFNLWxrCvqF40InOJJHSM=
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc=
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/tyler-smith/go-bip39 v1.0.0 h1:FOHg9gaQLeBBRbHE/QrTLfEiBHy5pQ/yXzf9JG5pYFM=
github.com/tyler-smith/go-bip39 v1.0.0/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c h1:+EXw7AwNOKzPFXMZ1yNjO40aWCh3PIquJB2fYlv9wcs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff h1:uuol9OUzSvZntY1v963NAbVd7A+PHLMz1FlCe3Lorcs=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -1,10 +0,0 @@
.idea
.DS_Store
/server/server.exe
/server/server
/server/server_dar*
/server/server_fre*
/server/server_win*
/server/server_net*
/server/server_ope*
CHANGELOG.md

View File

@ -1,31 +0,0 @@
language: go
go:
- 1.x
- tip
matrix:
allow_failures:
- go: tip
fast_finish: true
before_install:
- go get github.com/modocache/gover
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/goimports
- go get golang.org/x/lint/golint
- go get github.com/stretchr/testify/assert
- go get github.com/gordonklaus/ineffassign
script:
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
- diff <(echo -n) <(gofmt -s -d .)
- golint ./... # This won't break the build, just show warnings
- ineffassign .
- go vet ./...
- go test -race -count=1 -coverprofile=queue.coverprofile ./queue
- go test -race -count=1 -coverprofile=server.coverprofile ./server
- go test -race -count=1 -coverprofile=main.coverprofile
- $HOME/gopath/bin/gover
- $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,150 +0,0 @@
# BigCache [![Build Status](https://travis-ci.org/allegro/bigcache.svg?branch=master)](https://travis-ci.org/allegro/bigcache)&nbsp;[![Coverage Status](https://coveralls.io/repos/github/allegro/bigcache/badge.svg?branch=master)](https://coveralls.io/github/allegro/bigcache?branch=master)&nbsp;[![GoDoc](https://godoc.org/github.com/allegro/bigcache?status.svg)](https://godoc.org/github.com/allegro/bigcache)&nbsp;[![Go Report Card](https://goreportcard.com/badge/github.com/allegro/bigcache)](https://goreportcard.com/report/github.com/allegro/bigcache)
Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance.
BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
therefore entries (de)serialization in front of the cache will be needed in most use cases.
## Usage
### Simple initialization
```go
import "github.com/allegro/bigcache"
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
cache.Set("my-unique-key", []byte("value"))
entry, _ := cache.Get("my-unique-key")
fmt.Println(string(entry))
```
### Custom initialization
When cache load can be predicted in advance then it is better to use custom initialization because additional memory
allocation can be avoided in that way.
```go
import (
"log"
"github.com/allegro/bigcache"
)
config := bigcache.Config {
// number of shards (must be a power of 2)
Shards: 1024,
// time after which entry can be evicted
LifeWindow: 10 * time.Minute,
// rps * lifeWindow, used only in initial memory allocation
MaxEntriesInWindow: 1000 * 10 * 60,
// max entry size in bytes, used only in initial memory allocation
MaxEntrySize: 500,
// prints information about additional memory allocation
Verbose: true,
// cache will not allocate more memory than this limit, value in MB
// if value is reached then the oldest entries can be overridden for the new ones
// 0 value means no size limit
HardMaxCacheSize: 8192,
// callback fired when the oldest entry is removed because of its expiration time or no space left
// for the new entry, or because delete was called. A bitmask representing the reason will be returned.
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
OnRemove: nil,
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
// Ignored if OnRemove is specified.
OnRemoveWithReason: nil,
}
cache, initErr := bigcache.NewBigCache(config)
if initErr != nil {
log.Fatal(initErr)
}
cache.Set("my-unique-key", []byte("value"))
if entry, err := cache.Get("my-unique-key"); err == nil {
fmt.Println(string(entry))
}
```
## Benchmarks
Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
### Writes and reads
```bash
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
BenchmarkMapSet-8 3000000 569 ns/op 202 B/op 3 allocs/op
BenchmarkConcurrentMapSet-8 1000000 1592 ns/op 347 B/op 8 allocs/op
BenchmarkFreeCacheSet-8 3000000 775 ns/op 355 B/op 2 allocs/op
BenchmarkBigCacheSet-8 3000000 640 ns/op 303 B/op 2 allocs/op
BenchmarkMapGet-8 5000000 407 ns/op 24 B/op 1 allocs/op
BenchmarkConcurrentMapGet-8 3000000 558 ns/op 24 B/op 2 allocs/op
BenchmarkFreeCacheGet-8 2000000 682 ns/op 136 B/op 2 allocs/op
BenchmarkBigCacheGet-8 3000000 512 ns/op 152 B/op 4 allocs/op
BenchmarkBigCacheSetParallel-8 10000000 225 ns/op 313 B/op 3 allocs/op
BenchmarkFreeCacheSetParallel-8 10000000 218 ns/op 341 B/op 3 allocs/op
BenchmarkConcurrentMapSetParallel-8 5000000 318 ns/op 200 B/op 6 allocs/op
BenchmarkBigCacheGetParallel-8 20000000 178 ns/op 152 B/op 4 allocs/op
BenchmarkFreeCacheGetParallel-8 20000000 295 ns/op 136 B/op 3 allocs/op
BenchmarkConcurrentMapGetParallel-8 10000000 237 ns/op 24 B/op 2 allocs/op
```
Writes and reads in bigcache are faster than in freecache.
Writes to map are the slowest.
### GC pause time
```bash
cd caches_bench; go run caches_gc_overhead_comparison.go
Number of entries: 20000000
GC pause for bigcache: 5.8658ms
GC pause for freecache: 32.4341ms
GC pause for map: 52.9661ms
```
Test shows how long are the GC pauses for caches filled with 20mln of entries.
Bigcache and freecache have very similar GC pause time.
It is clear that both reduce GC overhead in contrast to map
which GC pause time took more than 10 seconds.
## How it works
BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)).
This optimization states that if map without pointers in keys and values is used then GC will omit its content.
Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries.
Entries are kept in bytes array, to omit GC again.
Bytes array size can grow to gigabytes without impact on performance
because GC will only see single pointer to it.
## Bigcache vs Freecache
Both caches provide the same core features but they reduce GC overhead in different ways.
Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on
slices to reduce number of pointers.
Results from benchmark tests are presented above.
One of the advantage of bigcache over freecache is that you dont need to know
the size of the cache in advance, because when bigcache is full,
it can allocate additional memory for new entries instead of
overwriting existing ones as freecache does currently.
However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config).
## HTTP Server
This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package.
## More
Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html)
## License
BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE))

View File

@ -1,202 +0,0 @@
package bigcache
import (
"fmt"
"time"
)
const (
minimumEntriesInShard = 10 // Minimum number of entries in single shard
)
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
type BigCache struct {
shards []*cacheShard
lifeWindow uint64
clock clock
hash Hasher
config Config
shardMask uint64
maxShardSize uint32
close chan struct{}
}
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
type RemoveReason uint32
const (
// Expired means the key is past its LifeWindow.
Expired RemoveReason = iota
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
// entry exceeded the maximum shard size.
NoSpace
// Deleted means Delete was called and this key was removed as a result.
Deleted
)
// NewBigCache initialize new instance of BigCache
func NewBigCache(config Config) (*BigCache, error) {
return newBigCache(config, &systemClock{})
}
func newBigCache(config Config, clock clock) (*BigCache, error) {
if !isPowerOfTwo(config.Shards) {
return nil, fmt.Errorf("Shards number must be power of two")
}
if config.Hasher == nil {
config.Hasher = newDefaultHasher()
}
cache := &BigCache{
shards: make([]*cacheShard, config.Shards),
lifeWindow: uint64(config.LifeWindow.Seconds()),
clock: clock,
hash: config.Hasher,
config: config,
shardMask: uint64(config.Shards - 1),
maxShardSize: uint32(config.maximumShardSize()),
close: make(chan struct{}),
}
var onRemove func(wrappedEntry []byte, reason RemoveReason)
if config.OnRemove != nil {
onRemove = cache.providedOnRemove
} else if config.OnRemoveWithReason != nil {
onRemove = cache.providedOnRemoveWithReason
} else {
onRemove = cache.notProvidedOnRemove
}
for i := 0; i < config.Shards; i++ {
cache.shards[i] = initNewShard(config, onRemove, clock)
}
if config.CleanWindow > 0 {
go func() {
ticker := time.NewTicker(config.CleanWindow)
defer ticker.Stop()
for {
select {
case t := <-ticker.C:
cache.cleanUp(uint64(t.Unix()))
case <-cache.close:
return
}
}
}()
}
return cache, nil
}
// Close is used to signal a shutdown of the cache when you are done with it.
// This allows the cleaning goroutines to exit and ensures references are not
// kept to the cache preventing GC of the entire cache.
func (c *BigCache) Close() error {
close(c.close)
return nil
}
// Get reads entry for the key.
// It returns an ErrEntryNotFound when
// no entry exists for the given key.
func (c *BigCache) Get(key string) ([]byte, error) {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.get(key, hashedKey)
}
// Set saves entry under the key
func (c *BigCache) Set(key string, entry []byte) error {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.set(key, hashedKey, entry)
}
// Delete removes the key
func (c *BigCache) Delete(key string) error {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.del(key, hashedKey)
}
// Reset empties all cache shards
func (c *BigCache) Reset() error {
for _, shard := range c.shards {
shard.reset(c.config)
}
return nil
}
// Len computes number of entries in cache
func (c *BigCache) Len() int {
var len int
for _, shard := range c.shards {
len += shard.len()
}
return len
}
// Capacity returns amount of bytes store in the cache.
func (c *BigCache) Capacity() int {
var len int
for _, shard := range c.shards {
len += shard.capacity()
}
return len
}
// Stats returns cache's statistics
func (c *BigCache) Stats() Stats {
var s Stats
for _, shard := range c.shards {
tmp := shard.getStats()
s.Hits += tmp.Hits
s.Misses += tmp.Misses
s.DelHits += tmp.DelHits
s.DelMisses += tmp.DelMisses
s.Collisions += tmp.Collisions
}
return s
}
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
func (c *BigCache) Iterator() *EntryInfoIterator {
return newIterator(c)
}
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > c.lifeWindow {
evict(Expired)
return true
}
return false
}
func (c *BigCache) cleanUp(currentTimestamp uint64) {
for _, shard := range c.shards {
shard.cleanUp(currentTimestamp)
}
}
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
return c.shards[hashedKey&c.shardMask]
}
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) {
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
}
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) {
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 {
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason)
}
}
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) {
}

View File

@ -1,167 +0,0 @@
package bigcache
import (
"fmt"
"math/rand"
"strconv"
"testing"
"time"
)
var message = blob('a', 256)
func BenchmarkWriteToCacheWith1Shard(b *testing.B) {
writeToCache(b, 1, 100*time.Second, b.N)
}
func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
m := blob('a', 1024)
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 100 * time.Second,
MaxEntriesInWindow: 100,
MaxEntrySize: 256,
HardMaxCacheSize: 1,
})
b.ReportAllocs()
for i := 0; i < b.N; i++ {
cache.Set(fmt.Sprintf("key-%d", i), m)
}
}
func BenchmarkWriteToUnlimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
m := blob('a', 1024)
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 100 * time.Second,
MaxEntriesInWindow: 100,
MaxEntrySize: 256,
})
b.ReportAllocs()
for i := 0; i < b.N; i++ {
cache.Set(fmt.Sprintf("key-%d", i), m)
}
}
func BenchmarkWriteToCache(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
writeToCache(b, shards, 100*time.Second, b.N)
})
}
}
func BenchmarkReadFromCache(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
readFromCache(b, 1024)
})
}
}
func BenchmarkIterateOverCache(b *testing.B) {
m := blob('a', 1)
for _, shards := range []int{512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
for i := 0; i < b.N; i++ {
cache.Set(fmt.Sprintf("key-%d", i), m)
}
b.ResetTimer()
it := cache.Iterator()
b.RunParallel(func(pb *testing.PB) {
b.ReportAllocs()
for pb.Next() {
if it.SetNext() {
it.Value()
}
}
})
})
}
}
func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
writeToCache(b, 1024, 100*time.Second, 100)
}
func BenchmarkReadFromCacheNonExistentKeys(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
readFromCacheNonExistentKeys(b, 1024)
})
}
}
func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: lifeWindow,
MaxEntriesInWindow: max(requestsInLifeWindow, 100),
MaxEntrySize: 500,
})
rand.Seed(time.Now().Unix())
b.RunParallel(func(pb *testing.PB) {
id := rand.Int()
counter := 0
b.ReportAllocs()
for pb.Next() {
cache.Set(fmt.Sprintf("key-%d-%d", id, counter), message)
counter = counter + 1
}
})
}
func readFromCache(b *testing.B, shards int) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
for i := 0; i < b.N; i++ {
cache.Set(strconv.Itoa(i), message)
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
b.ReportAllocs()
for pb.Next() {
cache.Get(strconv.Itoa(rand.Intn(b.N)))
}
})
}
func readFromCacheNonExistentKeys(b *testing.B, shards int) {
cache, _ := NewBigCache(Config{
Shards: shards,
LifeWindow: 1000 * time.Second,
MaxEntriesInWindow: max(b.N, 100),
MaxEntrySize: 500,
})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
b.ReportAllocs()
for pb.Next() {
cache.Get(strconv.Itoa(rand.Intn(b.N)))
}
})
}

View File

@ -1,734 +0,0 @@
package bigcache
import (
"bytes"
"fmt"
"math/rand"
"runtime"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var sink []byte
func TestParallel(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
value := []byte("value")
var wg sync.WaitGroup
wg.Add(3)
keys := 1337
// when
go func() {
defer wg.Done()
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), value)
}
}()
go func() {
defer wg.Done()
for i := 0; i < keys; i++ {
sink, _ = cache.Get(fmt.Sprintf("key%d", i))
}
}()
go func() {
defer wg.Done()
for i := 0; i < keys; i++ {
cache.Delete(fmt.Sprintf("key%d", i))
}
}()
// then
wg.Wait()
}
func TestWriteAndGetOnCache(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
value := []byte("value")
// when
cache.Set("key", value)
cachedValue, err := cache.Get("key")
// then
assert.NoError(t, err)
assert.Equal(t, value, cachedValue)
}
func TestConstructCacheWithDefaultHasher(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 16,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
})
assert.IsType(t, fnv64a{}, cache.hash)
}
func TestWillReturnErrorOnInvalidNumberOfPartitions(t *testing.T) {
t.Parallel()
// given
cache, error := NewBigCache(Config{
Shards: 18,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
})
assert.Nil(t, cache)
assert.Error(t, error, "Shards number must be power of two")
}
func TestEntryNotFound(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 16,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
})
// when
_, err := cache.Get("nonExistingKey")
// then
assert.EqualError(t, err, ErrEntryNotFound.Error())
}
func TestTimingEviction(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value2"))
_, err := cache.Get("key")
// then
assert.EqualError(t, err, ErrEntryNotFound.Error())
}
func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 4,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value 2"))
value, err := cache.Get("key")
// then
assert.NoError(t, err, ErrEntryNotFound.Error())
assert.Equal(t, []byte("value"), value)
}
func TestCleanShouldEvictAll(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 4,
LifeWindow: time.Second,
CleanWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
cache.Set("key", []byte("value"))
<-time.After(3 * time.Second)
value, err := cache.Get("key")
// then
assert.EqualError(t, err, ErrEntryNotFound.Error())
assert.Equal(t, value, []byte(nil))
}
func TestOnRemoveCallback(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
onRemoveInvoked := false
onRemoveExtInvoked := false
onRemove := func(key string, entry []byte) {
onRemoveInvoked = true
assert.Equal(t, "key", key)
assert.Equal(t, []byte("value"), entry)
}
onRemoveExt := func(key string, entry []byte, reason RemoveReason) {
onRemoveExtInvoked = true
}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
OnRemove: onRemove,
OnRemoveWithReason: onRemoveExt,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value2"))
// then
assert.True(t, onRemoveInvoked)
assert.False(t, onRemoveExtInvoked)
}
func TestOnRemoveWithReasonCallback(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
onRemoveInvoked := false
onRemove := func(key string, entry []byte, reason RemoveReason) {
onRemoveInvoked = true
assert.Equal(t, "key", key)
assert.Equal(t, []byte("value"), entry)
assert.Equal(t, reason, RemoveReason(Expired))
}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
OnRemoveWithReason: onRemove,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value2"))
// then
assert.True(t, onRemoveInvoked)
}
func TestOnRemoveFilter(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
onRemoveInvoked := false
onRemove := func(key string, entry []byte, reason RemoveReason) {
onRemoveInvoked = true
}
c := Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
OnRemoveWithReason: onRemove,
}.OnRemoveFilterSet(Deleted, NoSpace)
cache, _ := newBigCache(c, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key2", []byte("value2"))
// then
assert.False(t, onRemoveInvoked)
// and when
cache.Delete("key2")
// then
assert.True(t, onRemoveInvoked)
}
func TestCacheLen(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
// then
assert.Equal(t, keys, cache.Len())
}
func TestCacheStats(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
for i := 0; i < 100; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
for i := 0; i < 10; i++ {
value, err := cache.Get(fmt.Sprintf("key%d", i))
assert.Nil(t, err)
assert.Equal(t, string(value), "value")
}
for i := 100; i < 110; i++ {
_, err := cache.Get(fmt.Sprintf("key%d", i))
assert.Error(t, err)
}
for i := 10; i < 20; i++ {
err := cache.Delete(fmt.Sprintf("key%d", i))
assert.Nil(t, err)
}
for i := 110; i < 120; i++ {
err := cache.Delete(fmt.Sprintf("key%d", i))
assert.Error(t, err)
}
// then
stats := cache.Stats()
assert.Equal(t, stats.Hits, int64(10))
assert.Equal(t, stats.Misses, int64(10))
assert.Equal(t, stats.DelHits, int64(10))
assert.Equal(t, stats.DelMisses, int64(10))
}
func TestCacheDel(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(DefaultConfig(time.Second))
// when
err := cache.Delete("nonExistingKey")
// then
assert.Equal(t, err.Error(), ErrEntryNotFound.Error())
// and when
cache.Set("existingKey", nil)
err = cache.Delete("existingKey")
cachedValue, _ := cache.Get("existingKey")
// then
assert.Nil(t, err)
assert.Len(t, cachedValue, 0)
}
// TestCacheDelRandomly does simultaneous deletes, puts and gets, to check for corruption errors.
func TestCacheDelRandomly(t *testing.T) {
t.Parallel()
c := Config{
Shards: 1,
LifeWindow: time.Second,
CleanWindow: 0,
MaxEntriesInWindow: 10,
MaxEntrySize: 10,
Verbose: true,
Hasher: newDefaultHasher(),
HardMaxCacheSize: 1,
Logger: DefaultLogger(),
}
//c.Hasher = hashStub(5)
cache, _ := NewBigCache(c)
var wg sync.WaitGroup
var ntest = 800000
wg.Add(1)
go func() {
for i := 0; i < ntest; i++ {
r := uint8(rand.Int())
key := fmt.Sprintf("thekey%d", r)
cache.Delete(key)
}
wg.Done()
}()
wg.Add(1)
go func() {
val := make([]byte, 1024)
for i := 0; i < ntest; i++ {
r := byte(rand.Int())
key := fmt.Sprintf("thekey%d", r)
for j := 0; j < len(val); j++ {
val[j] = r
}
cache.Set(key, val)
}
wg.Done()
}()
wg.Add(1)
go func() {
val := make([]byte, 1024)
for i := 0; i < ntest; i++ {
r := byte(rand.Int())
key := fmt.Sprintf("thekey%d", r)
for j := 0; j < len(val); j++ {
val[j] = r
}
if got, err := cache.Get(key); err == nil && !bytes.Equal(got, val) {
t.Errorf("got %s ->\n %x\n expected:\n %x\n ", key, got, val)
}
}
wg.Done()
}()
wg.Wait()
}
func TestCacheReset(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
// then
assert.Equal(t, keys, cache.Len())
// and when
cache.Reset()
// then
assert.Equal(t, 0, cache.Len())
// and when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
// then
assert.Equal(t, keys, cache.Len())
}
func TestIterateOnResetCache(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
cache.Reset()
// then
iterator := cache.Iterator()
assert.Equal(t, false, iterator.SetNext())
}
func TestGetOnResetCache(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
keys := 1337
// when
for i := 0; i < keys; i++ {
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
}
cache.Reset()
// then
value, err := cache.Get("key1")
assert.Equal(t, err.Error(), ErrEntryNotFound.Error())
assert.Equal(t, value, []byte(nil))
}
func TestEntryUpdate(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 1,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
// when
cache.Set("key", []byte("value"))
clock.set(5)
cache.Set("key", []byte("value2"))
clock.set(7)
cache.Set("key2", []byte("value3"))
cachedValue, _ := cache.Get("key")
// then
assert.Equal(t, []byte("value2"), cachedValue)
}
func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
// when
cache.Set("key1", blob('a', 1024*400))
cache.Set("key2", blob('b', 1024*400))
cache.Set("key3", blob('c', 1024*800))
_, key1Err := cache.Get("key1")
_, key2Err := cache.Get("key2")
entry3, _ := cache.Get("key3")
// then
assert.EqualError(t, key1Err, ErrEntryNotFound.Error())
assert.EqualError(t, key2Err, ErrEntryNotFound.Error())
assert.Equal(t, blob('c', 1024*800), entry3)
}
func TestRetrievingEntryShouldCopy(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
cache.Set("key1", blob('a', 1024*400))
value, key1Err := cache.Get("key1")
// when
// override queue
cache.Set("key2", blob('b', 1024*400))
cache.Set("key3", blob('c', 1024*400))
cache.Set("key4", blob('d', 1024*400))
cache.Set("key5", blob('d', 1024*400))
// then
assert.Nil(t, key1Err)
assert.Equal(t, blob('a', 1024*400), value)
}
func TestEntryBiggerThanMaxShardSizeError(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
// when
err := cache.Set("key1", blob('a', 1024*1025))
// then
assert.EqualError(t, err, "entry is bigger than max shard size")
}
func TestHashCollision(t *testing.T) {
t.Parallel()
ml := &mockedLogger{}
// given
cache, _ := NewBigCache(Config{
Shards: 16,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
Verbose: true,
Hasher: hashStub(5),
Logger: ml,
})
// when
cache.Set("liquid", []byte("value"))
cachedValue, err := cache.Get("liquid")
// then
assert.NoError(t, err)
assert.Equal(t, []byte("value"), cachedValue)
// when
cache.Set("costarring", []byte("value 2"))
cachedValue, err = cache.Get("costarring")
// then
assert.NoError(t, err)
assert.Equal(t, []byte("value 2"), cachedValue)
// when
cachedValue, err = cache.Get("liquid")
// then
assert.Error(t, err)
assert.Nil(t, cachedValue)
assert.NotEqual(t, "", ml.lastFormat)
assert.Equal(t, cache.Stats().Collisions, int64(1))
}
func TestNilValueCaching(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})
// when
cache.Set("Kierkegaard", []byte{})
cachedValue, err := cache.Get("Kierkegaard")
// then
assert.NoError(t, err)
assert.Equal(t, []byte{}, cachedValue)
// when
cache.Set("Sartre", nil)
cachedValue, err = cache.Get("Sartre")
// then
assert.NoError(t, err)
assert.Equal(t, []byte{}, cachedValue)
// when
cache.Set("Nietzsche", []byte(nil))
cachedValue, err = cache.Get("Nietzsche")
// then
assert.NoError(t, err)
assert.Equal(t, []byte{}, cachedValue)
}
func TestClosing(t *testing.T) {
// given
config := Config{
CleanWindow: time.Minute,
}
startGR := runtime.NumGoroutine()
// when
for i := 0; i < 100; i++ {
cache, _ := NewBigCache(config)
cache.Close()
}
// wait till all goroutines are stopped.
time.Sleep(200 * time.Millisecond)
// then
endGR := runtime.NumGoroutine()
assert.True(t, endGR >= startGR)
assert.InDelta(t, endGR, startGR, 25)
}
type mockedLogger struct {
lastFormat string
lastArgs []interface{}
}
func (ml *mockedLogger) Printf(format string, v ...interface{}) {
ml.lastFormat = format
ml.lastArgs = v
}
type mockedClock struct {
value int64
}
func (mc *mockedClock) epoch() int64 {
return mc.value
}
func (mc *mockedClock) set(value int64) {
mc.value = value
}
func blob(char byte, len int) []byte {
b := make([]byte, len)
for index := range b {
b[index] = char
}
return b
}

View File

@ -1,14 +0,0 @@
// +build !appengine
package bigcache
import (
"reflect"
"unsafe"
)
func bytesToString(b []byte) string {
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
return *(*string)(unsafe.Pointer(&strHeader))
}

View File

@ -1,7 +0,0 @@
// +build appengine
package bigcache
func bytesToString(b []byte) string {
return string(b)
}

View File

@ -1,219 +0,0 @@
package main
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
"github.com/allegro/bigcache"
"github.com/coocood/freecache"
)
const maxEntrySize = 256
func BenchmarkMapSet(b *testing.B) {
m := make(map[string][]byte, b.N)
for i := 0; i < b.N; i++ {
m[key(i)] = value()
}
}
func BenchmarkConcurrentMapSet(b *testing.B) {
var m sync.Map
for i := 0; i < b.N; i++ {
m.Store(key(i), value())
}
}
func BenchmarkFreeCacheSet(b *testing.B) {
cache := freecache.NewCache(b.N * maxEntrySize)
for i := 0; i < b.N; i++ {
cache.Set([]byte(key(i)), value(), 0)
}
}
func BenchmarkBigCacheSet(b *testing.B) {
cache := initBigCache(b.N)
for i := 0; i < b.N; i++ {
cache.Set(key(i), value())
}
}
func BenchmarkMapGet(b *testing.B) {
b.StopTimer()
m := make(map[string][]byte)
for i := 0; i < b.N; i++ {
m[key(i)] = value()
}
b.StartTimer()
hitCount := 0
for i := 0; i < b.N; i++ {
if m[key(i)] != nil {
hitCount++
}
}
}
func BenchmarkConcurrentMapGet(b *testing.B) {
b.StopTimer()
var m sync.Map
for i := 0; i < b.N; i++ {
m.Store(key(i), value())
}
b.StartTimer()
hitCounter := 0
for i := 0; i < b.N; i++ {
_, ok := m.Load(key(i))
if ok {
hitCounter++
}
}
}
func BenchmarkFreeCacheGet(b *testing.B) {
b.StopTimer()
cache := freecache.NewCache(b.N * maxEntrySize)
for i := 0; i < b.N; i++ {
cache.Set([]byte(key(i)), value(), 0)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
cache.Get([]byte(key(i)))
}
}
func BenchmarkBigCacheGet(b *testing.B) {
b.StopTimer()
cache := initBigCache(b.N)
for i := 0; i < b.N; i++ {
cache.Set(key(i), value())
}
b.StartTimer()
for i := 0; i < b.N; i++ {
cache.Get(key(i))
}
}
func BenchmarkBigCacheSetParallel(b *testing.B) {
cache := initBigCache(b.N)
rand.Seed(time.Now().Unix())
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
counter := 0
for pb.Next() {
cache.Set(parallelKey(id, counter), value())
counter = counter + 1
}
})
}
func BenchmarkFreeCacheSetParallel(b *testing.B) {
cache := freecache.NewCache(b.N * maxEntrySize)
rand.Seed(time.Now().Unix())
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
counter := 0
for pb.Next() {
cache.Set([]byte(parallelKey(id, counter)), value(), 0)
counter = counter + 1
}
})
}
func BenchmarkConcurrentMapSetParallel(b *testing.B) {
var m sync.Map
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
for pb.Next() {
m.Store(key(id), value())
}
})
}
func BenchmarkBigCacheGetParallel(b *testing.B) {
b.StopTimer()
cache := initBigCache(b.N)
for i := 0; i < b.N; i++ {
cache.Set(key(i), value())
}
b.StartTimer()
b.RunParallel(func(pb *testing.PB) {
counter := 0
for pb.Next() {
cache.Get(key(counter))
counter = counter + 1
}
})
}
func BenchmarkFreeCacheGetParallel(b *testing.B) {
b.StopTimer()
cache := freecache.NewCache(b.N * maxEntrySize)
for i := 0; i < b.N; i++ {
cache.Set([]byte(key(i)), value(), 0)
}
b.StartTimer()
b.RunParallel(func(pb *testing.PB) {
counter := 0
for pb.Next() {
cache.Get([]byte(key(counter)))
counter = counter + 1
}
})
}
func BenchmarkConcurrentMapGetParallel(b *testing.B) {
b.StopTimer()
var m sync.Map
for i := 0; i < b.N; i++ {
m.Store(key(i), value())
}
b.StartTimer()
hitCount := 0
b.RunParallel(func(pb *testing.PB) {
id := rand.Intn(1000)
for pb.Next() {
_, ok := m.Load(key(id))
if ok {
hitCount++
}
}
})
}
func key(i int) string {
return fmt.Sprintf("key-%010d", i)
}
func value() []byte {
return make([]byte, 100)
}
func parallelKey(threadID int, counter int) string {
return fmt.Sprintf("key-%04d-%06d", threadID, counter)
}
func initBigCache(entriesInWindow int) *bigcache.BigCache {
cache, _ := bigcache.NewBigCache(bigcache.Config{
Shards: 256,
LifeWindow: 10 * time.Minute,
MaxEntriesInWindow: entriesInWindow,
MaxEntrySize: maxEntrySize,
Verbose: true,
})
return cache
}

View File

@ -1,96 +0,0 @@
package main
import (
"fmt"
"runtime"
"runtime/debug"
"time"
"github.com/allegro/bigcache"
"github.com/coocood/freecache"
)
func gcPause() time.Duration {
runtime.GC()
var stats debug.GCStats
debug.ReadGCStats(&stats)
return stats.PauseTotal
}
const (
entries = 20000000
valueSize = 100
)
func main() {
debug.SetGCPercent(10)
fmt.Println("Number of entries: ", entries)
config := bigcache.Config{
Shards: 256,
LifeWindow: 100 * time.Minute,
MaxEntriesInWindow: entries,
MaxEntrySize: 200,
Verbose: true,
}
bigcache, _ := bigcache.NewBigCache(config)
for i := 0; i < entries; i++ {
key, val := generateKeyValue(i, valueSize)
bigcache.Set(key, val)
}
firstKey, _ := generateKeyValue(1, valueSize)
checkFirstElement(bigcache.Get(firstKey))
fmt.Println("GC pause for bigcache: ", gcPause())
bigcache = nil
gcPause()
//------------------------------------------
freeCache := freecache.NewCache(entries * 200) //allocate entries * 200 bytes
for i := 0; i < entries; i++ {
key, val := generateKeyValue(i, valueSize)
if err := freeCache.Set([]byte(key), val, 0); err != nil {
fmt.Println("Error in set: ", err.Error())
}
}
firstKey, _ = generateKeyValue(1, valueSize)
checkFirstElement(freeCache.Get([]byte(firstKey)))
if freeCache.OverwriteCount() != 0 {
fmt.Println("Overwritten: ", freeCache.OverwriteCount())
}
fmt.Println("GC pause for freecache: ", gcPause())
freeCache = nil
gcPause()
//------------------------------------------
mapCache := make(map[string][]byte)
for i := 0; i < entries; i++ {
key, val := generateKeyValue(i, valueSize)
mapCache[key] = val
}
fmt.Println("GC pause for map: ", gcPause())
}
func checkFirstElement(val []byte, err error) {
_, expectedVal := generateKeyValue(1, valueSize)
if err != nil {
fmt.Println("Error in get: ", err.Error())
} else if string(val) != string(expectedVal) {
fmt.Println("Wrong first element: ", string(val))
}
}
func generateKeyValue(index int, valSize int) (string, []byte) {
key := fmt.Sprintf("key-%010d", index)
fixedNumber := []byte(fmt.Sprintf("%010d", index))
val := append(make([]byte, valSize-10), fixedNumber...)
return key, val
}

View File

@ -1,14 +0,0 @@
package bigcache
import "time"
type clock interface {
epoch() int64
}
type systemClock struct {
}
func (c systemClock) epoch() int64 {
return time.Now().Unix()
}

View File

@ -1,86 +0,0 @@
package bigcache
import "time"
// Config for BigCache
type Config struct {
// Number of cache shards, value must be a power of two
Shards int
// Time after which entry can be evicted
LifeWindow time.Duration
// Interval between removing expired entries (clean up).
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
CleanWindow time.Duration
// Max number of entries in life window. Used only to calculate initial size for cache shards.
// When proper value is set then additional memory allocation does not occur.
MaxEntriesInWindow int
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
MaxEntrySize int
// Verbose mode prints information about new memory allocation
Verbose bool
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
Hasher Hasher
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
// the oldest entries are overridden for the new ones.
HardMaxCacheSize int
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
// for the new entry, or because delete was called.
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
OnRemove func(key string, entry []byte)
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
// Ignored if OnRemove is specified.
OnRemoveWithReason func(key string, entry []byte, reason RemoveReason)
onRemoveFilter int
// Logger is a logging interface and used in combination with `Verbose`
// Defaults to `DefaultLogger()`
Logger Logger
}
// DefaultConfig initializes config with default values.
// When load for BigCache can be predicted in advance then it is better to use custom config.
func DefaultConfig(eviction time.Duration) Config {
return Config{
Shards: 1024,
LifeWindow: eviction,
CleanWindow: 0,
MaxEntriesInWindow: 1000 * 10 * 60,
MaxEntrySize: 500,
Verbose: true,
Hasher: newDefaultHasher(),
HardMaxCacheSize: 0,
Logger: DefaultLogger(),
}
}
// initialShardSize computes initial shard size
func (c Config) initialShardSize() int {
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
}
// maximumShardSize computes maximum shard size
func (c Config) maximumShardSize() int {
maxShardSize := 0
if c.HardMaxCacheSize > 0 {
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
}
return maxShardSize
}
// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason.
// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu.
func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config {
c.onRemoveFilter = 0
for i := range reasons {
c.onRemoveFilter |= 1 << uint(reasons[i])
}
return c
}

View File

@ -1,62 +0,0 @@
package bigcache
import (
"encoding/binary"
)
const (
timestampSizeInBytes = 8 // Number of bytes used for timestamp
hashSizeInBytes = 8 // Number of bytes used for hash
keySizeInBytes = 2 // Number of bytes used for size of entry key
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
)
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
keyLength := len(key)
blobLength := len(entry) + headersSizeInBytes + keyLength
if blobLength > len(*buffer) {
*buffer = make([]byte, blobLength)
}
blob := *buffer
binary.LittleEndian.PutUint64(blob, timestamp)
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
copy(blob[headersSizeInBytes:], key)
copy(blob[headersSizeInBytes+keyLength:], entry)
return blob[:blobLength]
}
func readEntry(data []byte) []byte {
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
// copy on read
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
copy(dst, data[headersSizeInBytes+length:])
return dst
}
func readTimestampFromEntry(data []byte) uint64 {
return binary.LittleEndian.Uint64(data)
}
func readKeyFromEntry(data []byte) string {
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
// copy on read
dst := make([]byte, length)
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
return bytesToString(dst)
}
func readHashFromEntry(data []byte) uint64 {
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
}
func resetKeyFromEntry(data []byte) {
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
}

View File

@ -1,46 +0,0 @@
package bigcache
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestEncodeDecode(t *testing.T) {
// given
now := uint64(time.Now().Unix())
hash := uint64(42)
key := "key"
data := []byte("data")
buffer := make([]byte, 100)
// when
wrapped := wrapEntry(now, hash, key, data, &buffer)
// then
assert.Equal(t, key, readKeyFromEntry(wrapped))
assert.Equal(t, hash, readHashFromEntry(wrapped))
assert.Equal(t, now, readTimestampFromEntry(wrapped))
assert.Equal(t, data, readEntry(wrapped))
assert.Equal(t, 100, len(buffer))
}
func TestAllocateBiggerBuffer(t *testing.T) {
//given
now := uint64(time.Now().Unix())
hash := uint64(42)
key := "1"
data := []byte("2")
buffer := make([]byte, 1)
// when
wrapped := wrapEntry(now, hash, key, data, &buffer)
// then
assert.Equal(t, key, readKeyFromEntry(wrapped))
assert.Equal(t, hash, readHashFromEntry(wrapped))
assert.Equal(t, now, readTimestampFromEntry(wrapped))
assert.Equal(t, data, readEntry(wrapped))
assert.Equal(t, 2+headersSizeInBytes, len(buffer))
}

View File

@ -1,6 +0,0 @@
package bigcache
import "errors"
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
var ErrEntryNotFound = errors.New("Entry not found")

View File

@ -1,28 +0,0 @@
package bigcache
// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
// Its Sum64 method will lay the value out in big-endian byte order.
// See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function
func newDefaultHasher() Hasher {
return fnv64a{}
}
type fnv64a struct{}
const (
// offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function#FNV-1a_hash
offset64 = 14695981039346656037
// prime64 FNVa prime value. See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function#FNV-1a_hash
prime64 = 1099511628211
)
// Sum64 gets the string and returns its uint64 hash value.
func (f fnv64a) Sum64(key string) uint64 {
var hash uint64 = offset64
for i := 0; i < len(key); i++ {
hash ^= uint64(key[i])
hash *= prime64
}
return hash
}

View File

@ -1,18 +0,0 @@
package bigcache
import "testing"
var text = "abcdefg"
func BenchmarkFnvHashSum64(b *testing.B) {
h := newDefaultHasher()
for i := 0; i < b.N; i++ {
h.Sum64(text)
}
}
func BenchmarkFnvHashStdLibSum64(b *testing.B) {
for i := 0; i < b.N; i++ {
stdLibFnvSum64(text)
}
}

View File

@ -1,35 +0,0 @@
package bigcache
import (
"hash/fnv"
"testing"
)
type testCase struct {
text string
expectedHash uint64
}
var testCases = []testCase{
{"", stdLibFnvSum64("")},
{"a", stdLibFnvSum64("a")},
{"ab", stdLibFnvSum64("ab")},
{"abc", stdLibFnvSum64("abc")},
{"some longer and more complicated text", stdLibFnvSum64("some longer and more complicated text")},
}
func TestFnvHashSum64(t *testing.T) {
h := newDefaultHasher()
for _, testCase := range testCases {
hashed := h.Sum64(testCase.text)
if hashed != testCase.expectedHash {
t.Errorf("hash(%q) = %d want %d", testCase.text, hashed, testCase.expectedHash)
}
}
}
func stdLibFnvSum64(key string) uint64 {
h := fnv.New64a()
h.Write([]byte(key))
return h.Sum64()
}

View File

@ -1,9 +0,0 @@
module github.com/allegro/bigcache
go 1.12
require (
github.com/cespare/xxhash v1.1.0 // indirect
github.com/coocood/freecache v1.1.0
github.com/stretchr/testify v1.3.0
)

View File

@ -1,13 +0,0 @@
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA=
github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=

View File

@ -1,8 +0,0 @@
package bigcache
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
// you can use FarmHash family).
type Hasher interface {
Sum64(string) uint64
}

View File

@ -1,7 +0,0 @@
package bigcache
type hashStub uint64
func (stub hashStub) Sum64(_ string) uint64 {
return uint64(stub)
}

View File

@ -1,122 +0,0 @@
package bigcache
import "sync"
type iteratorError string
func (e iteratorError) Error() string {
return string(e)
}
// ErrInvalidIteratorState is reported when iterator is in invalid state
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
var emptyEntryInfo = EntryInfo{}
// EntryInfo holds informations about entry in the cache
type EntryInfo struct {
timestamp uint64
hash uint64
key string
value []byte
}
// Key returns entry's underlying key
func (e EntryInfo) Key() string {
return e.key
}
// Hash returns entry's hash value
func (e EntryInfo) Hash() uint64 {
return e.hash
}
// Timestamp returns entry's timestamp (time of insertion)
func (e EntryInfo) Timestamp() uint64 {
return e.timestamp
}
// Value returns entry's underlying value
func (e EntryInfo) Value() []byte {
return e.value
}
// EntryInfoIterator allows to iterate over entries in the cache
type EntryInfoIterator struct {
mutex sync.Mutex
cache *BigCache
currentShard int
currentIndex int
elements []uint32
elementsCount int
valid bool
}
// SetNext moves to next element and returns true if it exists.
func (it *EntryInfoIterator) SetNext() bool {
it.mutex.Lock()
it.valid = false
it.currentIndex++
if it.elementsCount > it.currentIndex {
it.valid = true
it.mutex.Unlock()
return true
}
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
it.elements, it.elementsCount = it.cache.shards[i].copyKeys()
// Non empty shard - stick with it
if it.elementsCount > 0 {
it.currentIndex = 0
it.currentShard = i
it.valid = true
it.mutex.Unlock()
return true
}
}
it.mutex.Unlock()
return false
}
func newIterator(cache *BigCache) *EntryInfoIterator {
elements, count := cache.shards[0].copyKeys()
return &EntryInfoIterator{
cache: cache,
currentShard: 0,
currentIndex: -1,
elements: elements,
elementsCount: count,
}
}
// Value returns current value from the iterator
func (it *EntryInfoIterator) Value() (EntryInfo, error) {
it.mutex.Lock()
if !it.valid {
it.mutex.Unlock()
return emptyEntryInfo, ErrInvalidIteratorState
}
entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex]))
if err != nil {
it.mutex.Unlock()
return emptyEntryInfo, ErrCannotRetrieveEntry
}
it.mutex.Unlock()
return EntryInfo{
timestamp: readTimestampFromEntry(entry),
hash: readHashFromEntry(entry),
key: readKeyFromEntry(entry),
value: readEntry(entry),
}, nil
}

View File

@ -1,182 +0,0 @@
package bigcache
import (
"fmt"
"runtime"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestEntriesIterator(t *testing.T) {
t.Parallel()
// given
keysCount := 1000
cache, _ := NewBigCache(Config{
Shards: 8,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
value := []byte("value")
for i := 0; i < keysCount; i++ {
cache.Set(fmt.Sprintf("key%d", i), value)
}
// when
keys := make(map[string]struct{})
iterator := cache.Iterator()
for iterator.SetNext() {
current, err := iterator.Value()
if err == nil {
keys[current.Key()] = struct{}{}
}
}
// then
assert.Equal(t, keysCount, len(keys))
}
func TestEntriesIteratorWithMostShardsEmpty(t *testing.T) {
t.Parallel()
// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(Config{
Shards: 8,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)
cache.Set("key", []byte("value"))
// when
iterator := cache.Iterator()
// then
if !iterator.SetNext() {
t.Errorf("Iterator should contain at least single element")
}
current, err := iterator.Value()
// then
assert.Nil(t, err)
assert.Equal(t, "key", current.Key())
assert.Equal(t, uint64(0x3dc94a19365b10ec), current.Hash())
assert.Equal(t, []byte("value"), current.Value())
assert.Equal(t, uint64(0), current.Timestamp())
}
func TestEntriesIteratorWithConcurrentUpdate(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
cache.Set("key", []byte("value"))
// when
iterator := cache.Iterator()
// then
if !iterator.SetNext() {
t.Errorf("Iterator should contain at least single element")
}
// Quite ugly but works
for i := 0; i < cache.config.Shards; i++ {
if oldestEntry, err := cache.shards[i].getOldestEntry(); err == nil {
cache.onEvict(oldestEntry, 10, cache.shards[i].removeOldestEntry)
}
}
current, err := iterator.Value()
// then
assert.Equal(t, ErrCannotRetrieveEntry, err)
assert.Equal(t, "Could not retrieve entry from cache", err.Error())
assert.Equal(t, EntryInfo{}, current)
}
func TestEntriesIteratorWithAllShardsEmpty(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
iterator := cache.Iterator()
// then
if iterator.SetNext() {
t.Errorf("Iterator should not contain any elements")
}
}
func TestEntriesIteratorInInvalidState(t *testing.T) {
t.Parallel()
// given
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
})
// when
iterator := cache.Iterator()
// then
_, err := iterator.Value()
assert.Equal(t, ErrInvalidIteratorState, err)
assert.Equal(t, "Iterator is in invalid state. Use SetNext() to move to next position", err.Error())
}
func TestEntriesIteratorParallelAdd(t *testing.T) {
bc, err := NewBigCache(DefaultConfig(1 * time.Minute))
if err != nil {
panic(err)
}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
for i := 0; i < 10000; i++ {
err := bc.Set(strconv.Itoa(i), []byte("aaaaaaa"))
if err != nil {
panic(err)
}
runtime.Gosched()
}
wg.Done()
}()
for i := 0; i < 100; i++ {
iter := bc.Iterator()
for iter.SetNext() {
_, _ = iter.Value()
}
}
wg.Wait()
}

View File

@ -1,30 +0,0 @@
package bigcache
import (
"log"
"os"
)
// Logger is invoked when `Config.Verbose=true`
type Logger interface {
Printf(format string, v ...interface{})
}
// this is a safeguard, breaking on compile time in case
// `log.Logger` does not adhere to our `Logger` interface.
// see https://golang.org/doc/faq#guarantee_satisfies_interface
var _ Logger = &log.Logger{}
// DefaultLogger returns a `Logger` implementation
// backed by stdlib's log
func DefaultLogger() *log.Logger {
return log.New(os.Stdout, "", log.LstdFlags)
}
func newLogger(custom Logger) Logger {
if custom != nil {
return custom
}
return DefaultLogger()
}

View File

@ -1,238 +0,0 @@
package queue
import (
"encoding/binary"
"log"
"time"
)
const (
// Number of bytes used to keep information about entry size
headerEntrySize = 4
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
leftMarginIndex = 1
// Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
// It keeps entries indexes unchanged
minimumEmptyBlobSize = 32 + headerEntrySize
)
var (
errEmptyQueue = &queueError{"Empty queue"}
errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."}
errIndexOutOfBounds = &queueError{"Index out of range"}
)
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
// For every push operation index of entry is returned. It can be used to read the entry later
type BytesQueue struct {
array []byte
capacity int
maxCapacity int
head int
tail int
count int
rightMargin int
headerBuffer []byte
verbose bool
initialCapacity int
}
type queueError struct {
message string
}
// NewBytesQueue initialize new bytes queue.
// Initial capacity is used in bytes array allocation
// When verbose flag is set then information about memory allocation are printed
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
return &BytesQueue{
array: make([]byte, initialCapacity),
capacity: initialCapacity,
maxCapacity: maxCapacity,
headerBuffer: make([]byte, headerEntrySize),
tail: leftMarginIndex,
head: leftMarginIndex,
rightMargin: leftMarginIndex,
verbose: verbose,
initialCapacity: initialCapacity,
}
}
// Reset removes all entries from queue
func (q *BytesQueue) Reset() {
// Just reset indexes
q.tail = leftMarginIndex
q.head = leftMarginIndex
q.rightMargin = leftMarginIndex
q.count = 0
}
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
// Returns index for pushed data or error if maximum size queue limit is reached.
func (q *BytesQueue) Push(data []byte) (int, error) {
dataLen := len(data)
if q.availableSpaceAfterTail() < dataLen+headerEntrySize {
if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize {
q.tail = leftMarginIndex
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
return -1, &queueError{"Full queue. Maximum size limit reached."}
} else {
q.allocateAdditionalMemory(dataLen + headerEntrySize)
}
}
index := q.tail
q.push(data, dataLen)
return index, nil
}
func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
start := time.Now()
if q.capacity < minimum {
q.capacity += minimum
}
q.capacity = q.capacity * 2
if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
q.capacity = q.maxCapacity
}
oldArray := q.array
q.array = make([]byte, q.capacity)
if leftMarginIndex != q.rightMargin {
copy(q.array, oldArray[:q.rightMargin])
if q.tail < q.head {
emptyBlobLen := q.head - q.tail - headerEntrySize
q.push(make([]byte, emptyBlobLen), emptyBlobLen)
q.head = leftMarginIndex
q.tail = q.rightMargin
}
}
if q.verbose {
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
}
}
func (q *BytesQueue) push(data []byte, len int) {
binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len))
q.copy(q.headerBuffer, headerEntrySize)
q.copy(data, len)
if q.tail > q.head {
q.rightMargin = q.tail
}
q.count++
}
func (q *BytesQueue) copy(data []byte, len int) {
q.tail += copy(q.array[q.tail:], data[:len])
}
// Pop reads the oldest entry from queue and moves head pointer to the next one
func (q *BytesQueue) Pop() ([]byte, error) {
data, size, err := q.peek(q.head)
if err != nil {
return nil, err
}
q.head += headerEntrySize + size
q.count--
if q.head == q.rightMargin {
q.head = leftMarginIndex
if q.tail == q.rightMargin {
q.tail = leftMarginIndex
}
q.rightMargin = q.tail
}
return data, nil
}
// Peek reads the oldest entry from list without moving head pointer
func (q *BytesQueue) Peek() ([]byte, error) {
data, _, err := q.peek(q.head)
return data, err
}
// Get reads entry from index
func (q *BytesQueue) Get(index int) ([]byte, error) {
data, _, err := q.peek(index)
return data, err
}
// CheckGet checks if an entry can be read from index
func (q *BytesQueue) CheckGet(index int) error {
return q.peekCheckErr(index)
}
// Capacity returns number of allocated bytes for queue
func (q *BytesQueue) Capacity() int {
return q.capacity
}
// Len returns number of entries kept in queue
func (q *BytesQueue) Len() int {
return q.count
}
// Error returns error message
func (e *queueError) Error() string {
return e.message
}
// peekCheckErr is identical to peek, but does not actually return any data
func (q *BytesQueue) peekCheckErr(index int) error {
if q.count == 0 {
return errEmptyQueue
}
if index <= 0 {
return errInvalidIndex
}
if index+headerEntrySize >= len(q.array) {
return errIndexOutOfBounds
}
return nil
}
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
if q.count == 0 {
return nil, 0, errEmptyQueue
}
if index <= 0 {
return nil, 0, errInvalidIndex
}
if index+headerEntrySize >= len(q.array) {
return nil, 0, errIndexOutOfBounds
}
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil
}
func (q *BytesQueue) availableSpaceAfterTail() int {
if q.tail >= q.head {
return q.capacity - q.tail
}
return q.head - q.tail - minimumEmptyBlobSize
}
func (q *BytesQueue) availableSpaceBeforeHead() int {
if q.tail >= q.head {
return q.head - leftMarginIndex - minimumEmptyBlobSize
}
return q.head - q.tail - minimumEmptyBlobSize
}

View File

@ -1,374 +0,0 @@
package queue
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPushAndPop(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(10, 0, true)
entry := []byte("hello")
// when
_, err := queue.Pop()
// then
assert.EqualError(t, err, "Empty queue")
// when
queue.Push(entry)
// then
assert.Equal(t, entry, pop(queue))
}
func TestLen(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
entry := []byte("hello")
assert.Zero(t, queue.Len())
// when
queue.Push(entry)
// then
assert.Equal(t, queue.Len(), 1)
}
func TestPeek(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
entry := []byte("hello")
// when
read, err := queue.Peek()
err2 := queue.peekCheckErr(queue.head)
// then
assert.Equal(t, err, err2)
assert.EqualError(t, err, "Empty queue")
assert.Nil(t, read)
// when
queue.Push(entry)
read, err = queue.Peek()
err2 = queue.peekCheckErr(queue.head)
// then
assert.Equal(t, err, err2)
assert.NoError(t, err)
assert.Equal(t, pop(queue), read)
assert.Equal(t, entry, read)
}
func TestReset(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
entry := []byte("hello")
// when
queue.Push(entry)
queue.Push(entry)
queue.Push(entry)
queue.Reset()
read, err := queue.Peek()
// then
assert.EqualError(t, err, "Empty queue")
assert.Nil(t, read)
// when
queue.Push(entry)
read, err = queue.Peek()
// then
assert.NoError(t, err)
assert.Equal(t, pop(queue), read)
assert.Equal(t, entry, read)
// when
read, err = queue.Peek()
// then
assert.EqualError(t, err, "Empty queue")
assert.Nil(t, read)
}
func TestReuseAvailableSpace(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
// when
queue.Push(blob('a', 70))
queue.Push(blob('b', 20))
queue.Pop()
queue.Push(blob('c', 20))
// then
assert.Equal(t, 100, queue.Capacity())
assert.Equal(t, blob('b', 20), pop(queue))
}
func TestAllocateAdditionalSpace(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(11, 0, false)
// when
queue.Push([]byte("hello1"))
queue.Push([]byte("hello2"))
// then
assert.Equal(t, 22, queue.Capacity())
}
func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereHeadIsBeforeTail(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(25, 0, false)
// when
queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
queue.Push(blob('b', 6)) // additional 10 bytes
queue.Pop() // space freed, 7 bytes available at the beginning
queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
// then
assert.Equal(t, 50, queue.Capacity())
assert.Equal(t, blob('b', 6), pop(queue))
assert.Equal(t, blob('c', 6), pop(queue))
}
func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereHeadIsBeforeTail(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(25, 0, false)
// when
queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
index, _ := queue.Push(blob('b', 6)) // additional 10 bytes
queue.Pop() // space freed, 7 bytes available at the beginning
newestIndex, _ := queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
// then
assert.Equal(t, 50, queue.Capacity())
assert.Equal(t, blob('b', 6), get(queue, index))
assert.Equal(t, blob('c', 6), get(queue, newestIndex))
}
func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereTailIsBeforeHead(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
// when
queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
queue.Pop() // space freed at the beginning
queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
// then
assert.Equal(t, 200, queue.Capacity())
assert.Equal(t, blob('c', 30), pop(queue))
// empty blob fills space between tail and head,
// created when additional memory was allocated,
// it keeps current entries indexes unchanged
assert.Equal(t, blob(0, 36), pop(queue))
assert.Equal(t, blob('b', 10), pop(queue))
assert.Equal(t, blob('d', 40), pop(queue))
}
func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereTailIsBeforeHead(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(100, 0, false)
// when
queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
index, _ := queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
queue.Pop() // space freed at the beginning
queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
newestIndex, _ := queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
// then
assert.Equal(t, 200, queue.Capacity())
assert.Equal(t, blob('b', 10), get(queue, index))
assert.Equal(t, blob('d', 40), get(queue, newestIndex))
}
func TestAllocateAdditionalSpaceForValueBiggerThanInitQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(11, 0, false)
// when
queue.Push(blob('a', 100))
// then
assert.Equal(t, blob('a', 100), pop(queue))
assert.Equal(t, 230, queue.Capacity())
}
func TestAllocateAdditionalSpaceForValueBiggerThanQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(21, 0, false)
// when
queue.Push(make([]byte, 2))
queue.Push(make([]byte, 2))
queue.Push(make([]byte, 100))
// then
queue.Pop()
queue.Pop()
assert.Equal(t, make([]byte, 100), pop(queue))
assert.Equal(t, 250, queue.Capacity())
}
func TestPopWholeQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(13, 0, false)
// when
queue.Push([]byte("a"))
queue.Push([]byte("b"))
queue.Pop()
queue.Pop()
queue.Push([]byte("c"))
// then
assert.Equal(t, 13, queue.Capacity())
assert.Equal(t, []byte("c"), pop(queue))
}
func TestGetEntryFromIndex(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(20, 0, false)
// when
queue.Push([]byte("a"))
index, _ := queue.Push([]byte("b"))
queue.Push([]byte("c"))
result, _ := queue.Get(index)
// then
assert.Equal(t, []byte("b"), result)
}
func TestGetEntryFromInvalidIndex(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(1, 0, false)
queue.Push([]byte("a"))
// when
result, err := queue.Get(0)
err2 := queue.CheckGet(0)
// then
assert.Equal(t, err, err2)
assert.Nil(t, result)
assert.EqualError(t, err, "Index must be greater than zero. Invalid index.")
}
func TestGetEntryFromIndexOutOfRange(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(1, 0, false)
queue.Push([]byte("a"))
// when
result, err := queue.Get(42)
err2 := queue.CheckGet(42)
// then
assert.Equal(t, err, err2)
assert.Nil(t, result)
assert.EqualError(t, err, "Index out of range")
}
func TestGetEntryFromEmptyQueue(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(13, 0, false)
// when
result, err := queue.Get(1)
err2 := queue.CheckGet(1)
// then
assert.Equal(t, err, err2)
assert.Nil(t, result)
assert.EqualError(t, err, "Empty queue")
}
func TestMaxSizeLimit(t *testing.T) {
t.Parallel()
// given
queue := NewBytesQueue(30, 50, false)
// when
queue.Push(blob('a', 25))
queue.Push(blob('b', 5))
capacity := queue.Capacity()
_, err := queue.Push(blob('c', 15))
// then
assert.Equal(t, 50, capacity)
assert.EqualError(t, err, "Full queue. Maximum size limit reached.")
assert.Equal(t, blob('a', 25), pop(queue))
assert.Equal(t, blob('b', 5), pop(queue))
}
func pop(queue *BytesQueue) []byte {
entry, err := queue.Pop()
if err != nil {
panic(err)
}
return entry
}
func get(queue *BytesQueue, index int) []byte {
entry, err := queue.Get(index)
if err != nil {
panic(err)
}
return entry
}
func blob(char byte, len int) []byte {
b := make([]byte, len)
for index := range b {
b[index] = char
}
return b
}

View File

@ -1,105 +0,0 @@
# BigCache HTTP Server
This is a basic HTTP server implementation for BigCache. It has a basic RESTful API and is designed for easy operational deployments. This server is intended to be consumed as a standalone executable, for things like Cloud Foundry, Heroku, etc. A design goal is versatility, so if you want to cache pictures, software artifacts, text, or any type of bit, the BigCache HTTP Server should fit your needs.
```bash
# cache API.
GET /api/v1/cache/{key}
PUT /api/v1/cache/{key}
DELETE /api/v1/cache/{key}
# stats API.
GET /api/v1/stats
```
The cache API is designed for ease-of-use caching and accepts any content type. The stats API will return hit and miss statistics about the cache since the last time the server was started - they will reset whenever the server is restarted.
### Notes for Operators
1. No SSL support, currently.
1. No authentication, currently.
1. Stats from the stats API are not persistent.
1. The easiest way to clean the cache is to restart the process; it takes less than a second to initialise.
1. There is no replication or clustering.
### Command-line Interface
```powershell
PS C:\go\src\github.com\mxplusb\bigcache\server> .\server.exe -h
Usage of C:\go\src\github.com\mxplusb\bigcache\server\server.exe:
-lifetime duration
Lifetime of each cache object. (default 10m0s)
-logfile string
Location of the logfile.
-max int
Maximum amount of data in the cache in MB. (default 8192)
-maxInWindow int
Used only in initial memory allocation. (default 600000)
-maxShardEntrySize int
The maximum size of each object stored in a shard. Used only in initial memory allocation. (default 500)
-port int
The port to listen on. (default 9090)
-shards int
Number of shards for the cache. (default 1024)
-v Verbose logging.
-version
Print server version.
```
Example:
```bash
$ curl -v -XPUT localhost:9090/api/v1/cache/example -d "yay!"
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 9090 (#0)
> PUT /api/v1/cache/example HTTP/1.1
> Host: localhost:9090
> User-Agent: curl/7.47.0
> Accept: */*
> Content-Length: 4
> Content-Type: application/x-www-form-urlencoded
>
* upload completely sent off: 4 out of 4 bytes
< HTTP/1.1 201 Created
< Date: Fri, 17 Nov 2017 03:50:07 GMT
< Content-Length: 0
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
$
$ curl -v -XGET localhost:9090/api/v1/cache/example
Note: Unnecessary use of -X or --request, GET is already inferred.
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 9090 (#0)
> GET /api/v1/cache/example HTTP/1.1
> Host: localhost:9090
> User-Agent: curl/7.47.0
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Fri, 17 Nov 2017 03:50:23 GMT
< Content-Length: 4
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
yay!
```
The server does log basic metrics:
```bash
$ ./server
2017/11/16 22:49:22 cache initialised.
2017/11/16 22:49:22 starting server on :9090
2017/11/16 22:50:07 stored "example" in cache.
2017/11/16 22:50:07 request took 277000ns.
2017/11/16 22:50:23 request took 9000ns.
```
### Acquiring Natively
This is native Go with no external dependencies, so it will compile for all supported Golang platforms. To build:
```bash
go build server.go
```

View File

@ -1,87 +0,0 @@
package main
import (
"io/ioutil"
"log"
"net/http"
"strings"
)
func cacheIndexHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
getCacheHandler(w, r)
case http.MethodPut:
putCacheHandler(w, r)
case http.MethodDelete:
deleteCacheHandler(w, r)
}
})
}
// handles get requests.
func getCacheHandler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Path[len(cachePath):]
if target == "" {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("can't get a key if there is no key."))
log.Print("empty request.")
return
}
entry, err := cache.Get(target)
if err != nil {
errMsg := (err).Error()
if strings.Contains(errMsg, "not found") {
log.Print(err)
w.WriteHeader(http.StatusNotFound)
return
}
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(entry)
}
func putCacheHandler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Path[len(cachePath):]
if target == "" {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("can't put a key if there is no key."))
log.Print("empty request.")
return
}
entry, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if err := cache.Set(target, []byte(entry)); err != nil {
log.Print(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
log.Printf("stored \"%s\" in cache.", target)
w.WriteHeader(http.StatusCreated)
}
// delete cache objects.
func deleteCacheHandler(w http.ResponseWriter, r *http.Request) {
target := r.URL.Path[len(cachePath):]
if err := cache.Delete(target); err != nil {
if strings.Contains((err).Error(), "not found") {
w.WriteHeader(http.StatusNotFound)
log.Printf("%s not found.", target)
return
}
w.WriteHeader(http.StatusInternalServerError)
log.Printf("internal cache error: %s", err)
}
// this is what the RFC says to use when calling DELETE.
w.WriteHeader(http.StatusOK)
return
}

View File

@ -1,29 +0,0 @@
package main
import (
"log"
"net/http"
"time"
)
// our base middleware implementation.
type service func(http.Handler) http.Handler
// chain load middleware services.
func serviceLoader(h http.Handler, svcs ...service) http.Handler {
for _, svc := range svcs {
h = svc(h)
}
return h
}
// middleware for request length metrics.
func requestMetrics(l *log.Logger) service {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
h.ServeHTTP(w, r)
l.Printf("%s request to %s took %vns.", r.Method, r.URL.Path, time.Now().Sub(start).Nanoseconds())
})
}
}

View File

@ -1,47 +0,0 @@
package main
import (
"bytes"
"log"
"net/http"
"net/http/httptest"
"testing"
)
func emptyTestHandler() service {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
})
}
}
func TestServiceLoader(t *testing.T) {
req, err := http.NewRequest("GET", "/api/v1/stats", nil)
if err != nil {
t.Error(err)
}
rr := httptest.NewRecorder()
testHandlers := serviceLoader(cacheIndexHandler(), emptyTestHandler())
testHandlers.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusAccepted {
t.Errorf("handlers not loading properly. want: 202, got: %d", rr.Code)
}
}
func TestRequestMetrics(t *testing.T) {
var b bytes.Buffer
logger := log.New(&b, "", log.LstdFlags)
req, err := http.NewRequest("GET", "/api/v1/cache/empty", nil)
if err != nil {
t.Error(err)
}
rr := httptest.NewRecorder()
testHandlers := serviceLoader(cacheIndexHandler(), requestMetrics(logger))
testHandlers.ServeHTTP(rr, req)
targetTestString := b.String()
if len(targetTestString) == 0 {
t.Errorf("we are not logging request length strings.")
}
t.Log(targetTestString)
}

View File

@ -1,85 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/allegro/bigcache"
)
const (
// base HTTP paths.
apiVersion = "v1"
apiBasePath = "/api/" + apiVersion + "/"
// path to cache.
cachePath = apiBasePath + "cache/"
statsPath = apiBasePath + "stats"
// server version.
version = "1.0.0"
)
var (
port int
logfile string
ver bool
// cache-specific settings.
cache *bigcache.BigCache
config = bigcache.Config{}
)
func init() {
flag.BoolVar(&config.Verbose, "v", false, "Verbose logging.")
flag.IntVar(&config.Shards, "shards", 1024, "Number of shards for the cache.")
flag.IntVar(&config.MaxEntriesInWindow, "maxInWindow", 1000*10*60, "Used only in initial memory allocation.")
flag.DurationVar(&config.LifeWindow, "lifetime", 100000*100000*60, "Lifetime of each cache object.")
flag.IntVar(&config.HardMaxCacheSize, "max", 8192, "Maximum amount of data in the cache in MB.")
flag.IntVar(&config.MaxEntrySize, "maxShardEntrySize", 500, "The maximum size of each object stored in a shard. Used only in initial memory allocation.")
flag.IntVar(&port, "port", 9090, "The port to listen on.")
flag.StringVar(&logfile, "logfile", "", "Location of the logfile.")
flag.BoolVar(&ver, "version", false, "Print server version.")
}
func main() {
flag.Parse()
if ver {
fmt.Printf("BigCache HTTP Server v%s", version)
os.Exit(0)
}
var logger *log.Logger
if logfile == "" {
logger = log.New(os.Stdout, "", log.LstdFlags)
} else {
f, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
logger = log.New(f, "", log.LstdFlags)
}
var err error
cache, err = bigcache.NewBigCache(config)
if err != nil {
logger.Fatal(err)
}
logger.Print("cache initialised.")
// let the middleware log.
http.Handle(cachePath, serviceLoader(cacheIndexHandler(), requestMetrics(logger)))
http.Handle(statsPath, serviceLoader(statsIndexHandler(), requestMetrics(logger)))
logger.Printf("starting server on :%d", port)
strPort := ":" + strconv.Itoa(port)
log.Fatal("ListenAndServe: ", http.ListenAndServe(strPort, nil))
}

View File

@ -1,283 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"errors"
"io/ioutil"
"net/http/httptest"
"testing"
"time"
"github.com/allegro/bigcache"
)
const (
testBaseString = "http://bigcache.org"
)
func testCacheSetup() {
cache, _ = bigcache.NewBigCache(bigcache.Config{
Shards: 1024,
LifeWindow: 10 * time.Minute,
MaxEntriesInWindow: 1000 * 10 * 60,
MaxEntrySize: 500,
Verbose: true,
HardMaxCacheSize: 8192,
OnRemove: nil,
})
}
func TestMain(m *testing.M) {
testCacheSetup()
m.Run()
}
func TestGetWithNoKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/", nil)
rr := httptest.NewRecorder()
getCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 400 {
t.Errorf("want: 400; got: %d", resp.StatusCode)
}
}
func TestGetWithMissingKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/doesNotExist", nil)
rr := httptest.NewRecorder()
getCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 404 {
t.Errorf("want: 404; got: %d", resp.StatusCode)
}
}
func TestGetKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/getKey", nil)
rr := httptest.NewRecorder()
// set something.
cache.Set("getKey", []byte("123"))
getCacheHandler(rr, req)
resp := rr.Result()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("cannot deserialise test response: %s", err)
}
if string(body) != "123" {
t.Errorf("want: 123; got: %s.\n\tcan't get existing key getKey.", string(body))
}
}
func TestPutKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
putCacheHandler(rr, req)
testPutKeyResult, err := cache.Get("putKey")
if err != nil {
t.Errorf("error returning cache entry: %s", err)
}
if string(testPutKeyResult) != "123" {
t.Errorf("want: 123; got: %s.\n\tcan't get PUT key putKey.", string(testPutKeyResult))
}
}
func TestPutEmptyKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
putCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 400 {
t.Errorf("want: 400; got: %d.\n\tempty key insertion should return with 400", resp.StatusCode)
}
}
func TestDeleteEmptyKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
deleteCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 404 {
t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete empty keys.", resp.StatusCode)
}
}
func TestDeleteInvalidKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/invalidDeleteKey", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
deleteCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 404 {
t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete invalid keys.", resp.StatusCode)
}
}
func TestDeleteKey(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testDeleteKey", bytes.NewBuffer([]byte("123")))
rr := httptest.NewRecorder()
if err := cache.Set("testDeleteKey", []byte("123")); err != nil {
t.Errorf("can't set key for testing. %s", err)
}
deleteCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 200 {
t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode)
}
}
func TestGetStats(t *testing.T) {
t.Parallel()
var testStats bigcache.Stats
req := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil)
rr := httptest.NewRecorder()
// manually enter a key so there are some stats. get it so there's at least 1 hit.
if err := cache.Set("incrementStats", []byte("123")); err != nil {
t.Errorf("error setting cache value. error %s", err)
}
// it's okay if this fails, since we'll catch it downstream.
if _, err := cache.Get("incrementStats"); err != nil {
t.Errorf("can't find incrementStats. error: %s", err)
}
getCacheStatsHandler(rr, req)
resp := rr.Result()
if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil {
t.Errorf("error decoding cache stats. error: %s", err)
}
if testStats.Hits == 0 {
t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
}
}
func TestGetStatsIndex(t *testing.T) {
t.Parallel()
var testStats bigcache.Stats
getreq := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil)
putreq := httptest.NewRequest("PUT", testBaseString+"/api/v1/stats", nil)
rr := httptest.NewRecorder()
// manually enter a key so there are some stats. get it so there's at least 1 hit.
if err := cache.Set("incrementStats", []byte("123")); err != nil {
t.Errorf("error setting cache value. error %s", err)
}
// it's okay if this fails, since we'll catch it downstream.
if _, err := cache.Get("incrementStats"); err != nil {
t.Errorf("can't find incrementStats. error: %s", err)
}
testHandlers := statsIndexHandler()
testHandlers.ServeHTTP(rr, getreq)
resp := rr.Result()
if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil {
t.Errorf("error decoding cache stats. error: %s", err)
}
if testStats.Hits == 0 {
t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
}
testHandlers = statsIndexHandler()
testHandlers.ServeHTTP(rr, putreq)
resp = rr.Result()
_, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("cannot deserialise test response: %s", err)
}
}
func TestCacheIndexHandler(t *testing.T) {
getreq := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/testkey", nil)
putreq := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/testkey", bytes.NewBuffer([]byte("123")))
delreq := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testkey", bytes.NewBuffer([]byte("123")))
getrr := httptest.NewRecorder()
putrr := httptest.NewRecorder()
delrr := httptest.NewRecorder()
testHandlers := cacheIndexHandler()
testHandlers.ServeHTTP(putrr, putreq)
resp := putrr.Result()
if resp.StatusCode != 201 {
t.Errorf("want: 201; got: %d.\n\tcan't put keys.", resp.StatusCode)
}
testHandlers.ServeHTTP(getrr, getreq)
resp = getrr.Result()
if resp.StatusCode != 200 {
t.Errorf("want: 200; got: %d.\n\tcan't get keys.", resp.StatusCode)
}
testHandlers.ServeHTTP(delrr, delreq)
resp = delrr.Result()
if resp.StatusCode != 200 {
t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode)
}
}
func TestInvalidPutWhenExceedShardCap(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer(bytes.Repeat([]byte("a"), 8*1024*1024)))
rr := httptest.NewRecorder()
putCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 500 {
t.Errorf("want: 500; got: %d", resp.StatusCode)
}
}
func TestInvalidPutWhenReading(t *testing.T) {
t.Parallel()
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", errReader(0))
rr := httptest.NewRecorder()
putCacheHandler(rr, req)
resp := rr.Result()
if resp.StatusCode != 500 {
t.Errorf("want: 500; got: %d", resp.StatusCode)
}
}
type errReader int
func (errReader) Read([]byte) (int, error) {
return 0, errors.New("test read error")
}

View File

@ -1,33 +0,0 @@
package main
import (
"encoding/json"
"log"
"net/http"
)
// index for stats handle
func statsIndexHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet:
getCacheStatsHandler(w, r)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
})
}
// returns the cache's statistics.
func getCacheStatsHandler(w http.ResponseWriter, r *http.Request) {
target, err := json.Marshal(cache.Stats())
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("cannot marshal cache stats. error: %s", err)
return
}
// since we're sending a struct, make it easy for consumers to interface.
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Write(target)
return
}

View File

@ -1,259 +0,0 @@
package bigcache
import (
"fmt"
"sync"
"sync/atomic"
"github.com/allegro/bigcache/queue"
)
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason)
type cacheShard struct {
hashmap map[uint64]uint32
entries queue.BytesQueue
lock sync.RWMutex
entryBuffer []byte
onRemove onRemoveCallback
isVerbose bool
logger Logger
clock clock
lifeWindow uint64
stats Stats
}
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.miss()
return nil, ErrEntryNotFound
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.RUnlock()
s.miss()
return nil, err
}
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}
s.lock.RUnlock()
s.collision()
return nil, ErrEntryNotFound
}
entry := readEntry(wrappedEntry)
s.lock.RUnlock()
s.hit()
return entry, nil
}
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
currentTimestamp := uint64(s.clock.epoch())
s.lock.Lock()
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
resetKeyFromEntry(previousEntry)
}
}
if oldestEntry, err := s.entries.Peek(); err == nil {
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
}
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.lock.Unlock()
return nil
}
if s.removeOldestEntry(NoSpace) != nil {
s.lock.Unlock()
return fmt.Errorf("entry is bigger than max shard size")
}
}
}
func (s *cacheShard) del(key string, hashedKey uint64) error {
// Optimistic pre-check using only readlock
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.delmiss()
return ErrEntryNotFound
}
if err := s.entries.CheckGet(int(itemIndex)); err != nil {
s.lock.RUnlock()
s.delmiss()
return err
}
s.lock.RUnlock()
s.lock.Lock()
{
// After obtaining the writelock, we need to read the same again,
// since the data delivered earlier may be stale now
itemIndex = s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.Unlock()
s.delmiss()
return ErrEntryNotFound
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.Unlock()
s.delmiss()
return err
}
delete(s.hashmap, hashedKey)
s.onRemove(wrappedEntry, Deleted)
resetKeyFromEntry(wrappedEntry)
}
s.lock.Unlock()
s.delhit()
return nil
}
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > s.lifeWindow {
evict(Expired)
return true
}
return false
}
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
s.lock.Lock()
for {
if oldestEntry, err := s.entries.Peek(); err != nil {
break
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
break
}
}
s.lock.Unlock()
}
func (s *cacheShard) getOldestEntry() ([]byte, error) {
s.lock.RLock()
defer s.lock.RUnlock()
return s.entries.Peek()
}
func (s *cacheShard) getEntry(index int) ([]byte, error) {
s.lock.RLock()
entry, err := s.entries.Get(index)
s.lock.RUnlock()
return entry, err
}
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
s.lock.RLock()
keys = make([]uint32, len(s.hashmap))
for _, index := range s.hashmap {
keys[next] = index
next++
}
s.lock.RUnlock()
return keys, next
}
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {
oldest, err := s.entries.Pop()
if err == nil {
hash := readHashFromEntry(oldest)
delete(s.hashmap, hash)
s.onRemove(oldest, reason)
return nil
}
return err
}
func (s *cacheShard) reset(config Config) {
s.lock.Lock()
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
s.entries.Reset()
s.lock.Unlock()
}
func (s *cacheShard) len() int {
s.lock.RLock()
res := len(s.hashmap)
s.lock.RUnlock()
return res
}
func (s *cacheShard) capacity() int {
s.lock.RLock()
res := s.entries.Capacity()
s.lock.RUnlock()
return res
}
func (s *cacheShard) getStats() Stats {
var stats = Stats{
Hits: atomic.LoadInt64(&s.stats.Hits),
Misses: atomic.LoadInt64(&s.stats.Misses),
DelHits: atomic.LoadInt64(&s.stats.DelHits),
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
Collisions: atomic.LoadInt64(&s.stats.Collisions),
}
return stats
}
func (s *cacheShard) hit() {
atomic.AddInt64(&s.stats.Hits, 1)
}
func (s *cacheShard) miss() {
atomic.AddInt64(&s.stats.Misses, 1)
}
func (s *cacheShard) delhit() {
atomic.AddInt64(&s.stats.DelHits, 1)
}
func (s *cacheShard) delmiss() {
atomic.AddInt64(&s.stats.DelMisses, 1)
}
func (s *cacheShard) collision() {
atomic.AddInt64(&s.stats.Collisions, 1)
}
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
return &cacheShard{
hashmap: make(map[uint64]uint32, config.initialShardSize()),
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
onRemove: callback,
isVerbose: config.Verbose,
logger: newLogger(config.Logger),
clock: clock,
lifeWindow: uint64(config.LifeWindow.Seconds()),
}
}

View File

@ -1,15 +0,0 @@
package bigcache
// Stats stores cache statistics
type Stats struct {
// Hits is a number of successfully found keys
Hits int64 `json:"hits"`
// Misses is a number of not found keys
Misses int64 `json:"misses"`
// DelHits is a number of successfully deleted keys
DelHits int64 `json:"delete_hits"`
// DelMisses is a number of not deleted keys
DelMisses int64 `json:"delete_misses"`
// Collisions is a number of happened key-collisions
Collisions int64 `json:"collisions"`
}

View File

@ -1,16 +0,0 @@
package bigcache
func max(a, b int) int {
if a > b {
return a
}
return b
}
func convertMBToBytes(value int) int {
return value * 1024 * 1024
}
func isPowerOfTwo(number int) bool {
return (number & (number - 1)) == 0
}

View File

@ -1,17 +0,0 @@
language: go
go:
- 1.11.x
- 1.x
- master
before_install:
- go get -v golang.org/x/lint/golint
- go get -v -t -d ./...
after_success:
- make coverdata
- bash <(curl -s https://codecov.io/bash)
script:
- make -j4 check GOTEST_FLAGS=-v
notifications:
slack:
secure: MO/3LqbyALbi9vAY3pZetp/LfRuKEPAYEUya7XKmTWA3OFHYkTGqJWNosVkFJd6eSKwnc3HP4jlKADEBNVxADHzcA3uMPUQi1mIcNk/Ps1WWMNDv1liE2XOoOmHSHZ/8ksk6TNq83x+d17ZffYq8KAH6iKNKvllO1JzQPgJJdf+cNXQQlg6uPSe+ggMpjqVLkKcHqA4L3/BWo6fNcyvkqaN3uXcEzYPi7Nb2q9tl0ja6ToyZV4H6SinwitZmpedN3RkBcm4fKmGyw5ikzH93ycA5SvWrnXTh1dJvq6DU0FV7iwI6oqPTbAUc3FE5g7aEkK0qVR21s2j+KNaOLnuX10ZGQFwj2r3SW2REHq4j+qqFla/2EmSFZJt3GXYS+plmGCxqCgyjSw6tTi7LaGZ/mWBJEA9/EaXG1NkwlQYx5tdUMeGj77OczjXClynpb2hJ7MM2b32Rnp0JmNaXAh01SmClo+8nDWuksAsIdPtWsbF0/XHmEJiqpu8ojvVXOQIbPt43bjG7PS1t5jaRAU/N1n56SiCGgCSGd3Ui5eX5vmgWdpZMl8NG05G4LFsgmkdphRT5fru0C2PrhNZYRDGWs63XKapBxsvfqGzdHxTtYuaDjHjrI+9w0BC/8kEzSWoPmabQ5ci4wf4DeplcIay4tDMgMSo8pGAf52vrne4rmUo=
on_success: change

View File

@ -1,25 +0,0 @@
All contributors are required to sign a "Contributor License Agreement" at
<TBD>
The following organizations and people have contributed code to this library.
(Please keep both lists sorted alphabetically.)
Arista Networks, Inc.
Benoit Sigoure
Fabrice Rabaute
The list of individual contributors for code currently in HEAD can be obtained
at any time with the following script:
find . -type f \
| while read i; do \
git blame -t $i 2>/dev/null; \
done \
| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \
| awk '{a[$0]++; t++} END{for(n in a) print n}' \
| sort

View File

@ -1,177 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -1,14 +0,0 @@
# Copyright (c) 2016 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
# TODO: move this to cmd/ockafka (https://github.com/docker/hub-feedback/issues/292)
FROM golang:1.10.3
RUN mkdir -p /go/src/github.com/aristanetworks/goarista/cmd
WORKDIR /go/src/github.com/aristanetworks/goarista
COPY ./ .
RUN go get -d ./cmd/ockafka/... \
&& go install ./cmd/ockafka
ENTRYPOINT ["/go/bin/ockafka"]

View File

@ -1,59 +0,0 @@
# Copyright (c) 2015 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
GO := go
TEST_TIMEOUT := 30s
GOTEST_FLAGS :=
DEFAULT_GOPATH := $${GOPATH%%:*}
GOPATH_BIN := $(DEFAULT_GOPATH)/bin
GOPATH_PKG := $(DEFAULT_GOPATH)/pkg
GOLINT := $(GOPATH_BIN)/golint
GOFOLDERS := find . -type d ! -path "./.git/*"
all: install
install:
$(GO) install ./...
check: vet test fmtcheck lint
COVER_PKGS := key test
COVER_MODE := count
coverdata:
echo 'mode: $(COVER_MODE)' >coverage.out
for dir in $(COVER_PKGS); do \
$(GO) test -covermode=$(COVER_MODE) -coverprofile=cov.out-t ./$$dir || exit; \
tail -n +2 cov.out-t >> coverage.out && \
rm cov.out-t; \
done;
coverage: coverdata
$(GO) tool cover -html=coverage.out
rm -f coverage.out
fmtcheck:
errors=`gofmt -l .`; if test -n "$$errors"; then echo Check these files for style errors:; echo "$$errors"; exit 1; fi
find . -name '*.go' ! -name '*.pb.go' -exec ./check_line_len.awk {} +
./check_copyright_notice.sh
vet:
$(GO) vet ./...
lint:
lint=`$(GOFOLDERS) | xargs -L 1 $(GOLINT) | fgrep -v .pb.go`; if test -n "$$lint"; then echo "$$lint"; exit 1; fi
# The above is ugly, but unfortunately golint doesn't exit 1 when it finds
# lint. See https://github.com/golang/lint/issues/65
test:
$(GO) test $(GOTEST_FLAGS) -timeout=$(TEST_TIMEOUT) ./...
docker:
docker build -f cmd/occlient/Dockerfile .
clean:
rm -rf $(GOPATH_PKG)/*/github.com/aristanetworks/goarista
$(GO) clean ./...
.PHONY: all check coverage coverdata docker fmtcheck install lint test vet

View File

@ -1,70 +0,0 @@
# Arista Go library [![Build Status](https://travis-ci.org/aristanetworks/goarista.svg?branch=master)](https://travis-ci.org/aristanetworks/goarista) [![codecov.io](http://codecov.io/github/aristanetworks/goarista/coverage.svg?branch=master)](http://codecov.io/github/aristanetworks/goarista?branch=master) [![GoDoc](https://godoc.org/github.com/aristanetworks/goarista?status.png)](https://godoc.org/github.com/aristanetworks/goarista) [![Go Report Card](https://goreportcard.com/badge/github.com/aristanetworks/goarista)](https://goreportcard.com/report/github.com/aristanetworks/goarista)
## areflect
Helper functions to work with the `reflect` package. Contains
`ForceExport()`, which bypasses the check in `reflect.Value` that
prevents accessing unexported attributes.
## monotime
Provides access to a fast monotonic clock source, to fill in the gap in the
[Go standard library, which lacks one](https://github.com/golang/go/issues/12914).
Don't use `time.Now()` in code that needs to time things or otherwise assume
that time passes at a constant rate, instead use `monotime.Now()`.
## cmd
See the [cmd](cmd) directory.
## dscp
Provides `ListenTCPWithTOS()`, which is a replacement for `net.ListenTCP()`
that allows specifying the ToS (Type of Service), to specify DSCP / ECN /
class of service flags to use for incoming connections. Requires `go1.9`.
## key
Provides common types used across various Arista projects. The type `key.Key`
is used to work around the fact that Go can't let one use a non-hashable type
as a key to a `map`, and we sometimes need to use a `map[string]interface{}`
(or something containing one) as a key to maps. As a result, we frequently use
`map[key.Key]interface{}` instead of just `map[interface{}]interface{}` when we
need a generic key-value collection. The type `key.Path` is the representation
of a path broken down into individual elements, where each element is a `key.Key`.
The type `key.Pointer` represents a pointer to a `key.Path`.
## path
Provides functions that can be used to manipulate `key.Path` objects. The type
`path.Map` may be used for mapping paths to values. It allows for some fuzzy
matching for paths containing `path.Wildcard` keys.
## lanz
A client for [LANZ](https://eos.arista.com/latency-analyzer-lanz-architectures-and-configuration/)
streaming servers. It connects to a LANZ streaming server,
listens for notifications, decodes them and sends the LANZ protobuf on the
provided channel.
## monitor
A library to help expose monitoring metrics on top of the
[`expvar`](https://golang.org/pkg/expvar/) infrastructure.
## netns
`netns.Do(namespace, cb)` provides a handy mechanism to execute the given
callback `cb` in the given [network namespace](https://lwn.net/Articles/580893/).
## influxlib
This is a influxdb library that provides easy methods of connecting to, writing to,
and reading from the service.
## test
This is a [Go](http://golang.org/) library to help in writing unit tests.
## Examples
TBD

View File

@ -1,38 +0,0 @@
// Copyright (c) 2014 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// Package areflect provides utilities to help with reflection.
package areflect
import (
"reflect"
"unsafe"
)
// ForceExport returns a new reflect.Value that is identical to the one passed
// in argument except that it's considered as an exported symbol even if in
// reality it isn't.
//
// The `reflect' package intentionally makes it impossible to access the value
// of an unexported attribute. The implementation of reflect.DeepEqual() cheats
// as it bypasses this check. Unfortunately, we can't use the same cheat, which
// prevents us from re-implementing DeepEqual properly or implementing some other
// reflection-based tools. So this is our cheat on top of theirs. It makes
// the given reflect.Value appear as if it was exported.
//
// This function requires go1.6 or newer.
func ForceExport(v reflect.Value) reflect.Value {
// constants from reflect/value.go
const flagStickyRO uintptr = 1 << 5
const flagEmbedRO uintptr = 1 << 6 // new in go1.6 (was flagIndir before)
const flagRO uintptr = flagStickyRO | flagEmbedRO
ptr := unsafe.Pointer(&v)
rv := (*struct {
typ unsafe.Pointer // a *reflect.rtype (reflect.Type)
ptr unsafe.Pointer // The value wrapped by this reflect.Value
flag uintptr
})(ptr)
rv.flag &= ^flagRO // Unset the flag so this value appears to be exported.
return v
}

View File

@ -1,36 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package areflect
import (
"reflect"
"testing"
)
type embedme struct {
}
type somestruct struct {
a uint32
embedme
}
func TestForcePublic(t *testing.T) {
c := somestruct{a: 42}
v := reflect.ValueOf(c)
// Without the call to forceExport(), the following line would crash with
// "panic: reflect.Value.Interface: cannot return value obtained from
// unexported field or method".
a := ForceExport(v.FieldByName("a")).Interface()
if i, ok := a.(uint32); !ok {
t.Fatalf("Should have gotten a uint32 but got a %T", a)
} else if i != 42 {
t.Fatalf("Should have gotten 42 but got a %d", i)
}
e := ForceExport(v.FieldByName("embedme")).Interface()
if _, ok := e.(embedme); !ok {
t.Fatalf("Should have gotten a embedme but got a %T", e)
}
}

View File

@ -1,19 +0,0 @@
#!/bin/sh
# Copyright (c) 2017 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
# egrep that comes with our Linux distro doesn't like \d, so use [0-9]
notice='Copyright \(c\) 20[0-9][0-9] Arista Networks, Inc.'
files=`git diff-tree --no-commit-id --name-only --diff-filter=ACMR -r HEAD | \
egrep '\.(go|proto|py|sh)$' | grep -v '\.pb\.go$'`
status=0
for file in $files; do
if ! egrep -q "$notice" $file; then
echo "$file: missing or incorrect copyright notice"
status=1
fi
done
exit $status

View File

@ -1,25 +0,0 @@
#!/usr/bin/awk -f
# Copyright (c) 2015 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the COPYING file.
BEGIN {
max = 100;
}
# Expand tabs to 4 spaces.
{
gsub(/\t/, " ");
}
length() > max {
errors++;
print FILENAME ":" FNR ": Line too long (" length() "/" max ")";
}
END {
if (errors >= 125) {
errors = 125;
}
exit errors;
}

View File

@ -1,16 +0,0 @@
# OpenConfig clients
The `oc*` commands are clients for the [OpenConfig](http://openconfig.net) gRPC interface.
# importsort
`importsort` is a utility for sorting and sectioning import blocks in go code.
# Running
After installing [Go](https://golang.org/dl/) and setting the [GOPATH](https://golang.org/doc/code.html#GOPATH) environment variable to the path to your workspace, you can just run:
```
go get github.com/aristanetworks/goarista/cmd/<cmd>
$GOPATH/bin/<cmd>
```

View File

@ -1,202 +0,0 @@
# gnmi
`gnmi` is a command-line client for interacting with a
[gNMI service](https://github.com/openconfig/reference/tree/master/rpc/gnmi).
# Installation
After installing [Go](https://golang.org/dl/) run:
```
go get github.com/aristanetworks/goarista/cmd/gnmi
```
This will install the `gnmi` binary in the `bin` directory
under [GOPATH](https://golang.org/doc/code.html#GOPATH).
# Usage
```
$ gnmi [OPTIONS] [OPERATION]
```
When running on the switch in a non-default VRF:
```
$ ip netns exec ns-<VRF> gnmi [OPTIONS] [OPERATION]
```
## Options
* `-addr [<VRF-NAME>/]ADDR:PORT`
Address of the gNMI endpoint (REQUIRED) with VRF name (OPTIONAL)
* `-username USERNAME`
Username to authenticate with
* `-password PASSWORD`
Password to authenticate with
* `-tls`
Enable TLS
* `-cafile PATH`
Path to server TLS certificate file
* `-certfile PATH`
Path to client TLS certificate file
* `-keyfile PATH`
Path to client TLS private key file
## Operations
`gnmi` supports the following operations: `capabilites`, `get`,
`subscribe`, `update`, `replace`, and `delete`.
### capabilities
`capabilities` prints the result of calling the
[Capabilities gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery).
Example:
```
$ gnmi [OPTIONS] capabilities
```
### get
`get` requires a path and calls the
[Get gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths).
Example:
Get all configuration in the default network instance:
```
$ gnmi [OPTIONS] get '/network-instances/network-instance[name=default]'
```
### subscribe
`subscribe` requires a path and calls the
[Subscribe gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35-subscribing-to-telemetry-updates).
This command will continuously print out results until signalled to
exit, for example by typing `Ctrl-C`.
Example:
Subscribe to interface counters:
```
$ gnmi [OPTIONS] subscribe '/interfaces/interface[name=*]/state/counters'
```
### update/replace/delete
`update`, `replace`, and `delete` are used to
[modify the configuration of a gNMI endpoint](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state).
All of these operations take a path that must specify a single node
element. In other words all list members must be fully-specified.
`delete` takes a path and will delete that path.
Example:
Delete BGP configuration in the default network instance:
```
$ gnmi [OPTIONS] delete '/network-instances/network-instance[name=default]/protocols/protocol[name=BGP][identifier=BGP]/'
```
`update` and `replace` both take a path and a value in JSON
format. The JSON data may be provided in a file. See
[here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#344-modes-of-update-replace-versus-update)
for documentation on the differences between `update` and `replace`.
Examples:
Disable interface Ethernet3/42:
```
gnmi [OPTIONS] update '/interfaces/interface[name=Ethernet3/42]/config/enabled' 'false'
```
Replace the BGP global configuration:
```
gnmi [OPTIONS] replace '/network-instances/network-instance[name=default]/protocols/protocol[name=BGP][identifier=BGP]/bgp/global' '{"config":{"as": 1234, "router-id": "1.2.3.4"}}'
```
Note: String values need to be quoted if they look like JSON. For example, setting the login banner to `tor[13]`:
```
gnmi [OPTIONS] update '/system/config/login-banner '"tor[13]"'
```
#### JSON in a file
The value argument to `update` and `replace` may be a file. The
content of the file is used to make the request.
Example:
File `path/to/subintf100.json` contains the following:
```
{
"subinterface": [
{
"config": {
"enabled": true,
"index": 100
},
"index": 100
}
]
}
```
Add subinterface 100 to interfaces Ethernet4/1/1 and Ethernet4/2/1 in
one transaction:
```
gnmi [OPTIONS] update '/interfaces/interface[name=Ethernet4/1/1]/subinterfaces' path/to/subintf100.json \
update '/interfaces/interface[name=Ethernet4/2/1]/subinterfaces' path/to/subintf100.json
```
### CLI requests
`gnmi` offers the ability to send CLI text inside an `update` or
`replace` operation. This is achieved by doing an `update` or
`replace` and specifying `"origin=cli"` along with an empty path and a set of configure-mode
CLI commands separated by `\n`.
Example:
Configure the idle-timeout on SSH connections
```
gnmi [OPTIONS] update 'origin=cli' "" 'management ssh
idle-timeout 300'
```
### P4 Config
`gnmi` offers the ability to send p4 config files inside a `replace` operation.
This is achieved by doing a `replace` and specifying `"origin=p4_config"`
along with the path of the p4 config file to send.
Example:
Send the config.p4 file
```
gnmi [OPTIONS] replace 'origin=p4_config' 'config.p4'
```
## Paths
Paths in `gnmi` use a simplified xpath style. Path elements are
separated by `/`. Selectors may be used on list to select certain
members. Selectors are of the form `[key-leaf=value]`. All members of a
list may be selected by not specifying any selectors, or by using a
`*` as the value in a selector. The following are equivalent:
* `/interfaces/interface`
* `/interfaces/interface[name=*]`
All characters, including `/` are allowed inside a selector value. The
character `]` must be escaped, for example `[key=[\]]` selects the
element in the list whose `key` leaf is value `[]`.
See more examples of paths in the examples above.
See
[here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths)
for more information.

View File

@ -1,185 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"context"
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/aristanetworks/goarista/gnmi"
"github.com/aristanetworks/glog"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
// TODO: Make this more clear
var help = `Usage of gnmi:
gnmi -addr [<VRF-NAME>/]ADDRESS:PORT [options...]
capabilities
get (origin=ORIGIN) PATH+
subscribe (origin=ORIGIN) PATH+
((update|replace (origin=ORIGIN) PATH JSON|FILE)|(delete (origin=ORIGIN) PATH))+
`
func usageAndExit(s string) {
flag.Usage()
if s != "" {
fmt.Fprintln(os.Stderr, s)
}
os.Exit(1)
}
func main() {
cfg := &gnmi.Config{}
flag.StringVar(&cfg.Addr, "addr", "", "Address of gNMI gRPC server with optional VRF name")
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
flag.StringVar(&cfg.Compression, "compression", "gzip", "Compression method. "+
`Supported options: "" and "gzip"`)
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
subscribeOptions := &gnmi.SubscribeOptions{}
flag.StringVar(&subscribeOptions.Prefix, "prefix", "", "Subscribe prefix path")
flag.BoolVar(&subscribeOptions.UpdatesOnly, "updates_only", false,
"Subscribe to updates only (false | true)")
flag.StringVar(&subscribeOptions.Mode, "mode", "stream",
"Subscribe mode (stream | once | poll)")
flag.StringVar(&subscribeOptions.StreamMode, "stream_mode", "target_defined",
"Subscribe stream mode, only applies for stream subscriptions "+
"(target_defined | on_change | sample)")
sampleIntervalStr := flag.String("sample_interval", "0", "Subscribe sample interval, "+
"only applies for sample subscriptions (400ms, 2.5s, 1m, etc.)")
heartbeatIntervalStr := flag.String("heartbeat_interval", "0", "Subscribe heartbeat "+
"interval, only applies for on-change subscriptions (400ms, 2.5s, 1m, etc.)")
flag.Usage = func() {
fmt.Fprintln(os.Stderr, help)
flag.PrintDefaults()
}
flag.Parse()
if cfg.Addr == "" {
usageAndExit("error: address not specified")
}
var sampleInterval, heartbeatInterval time.Duration
var err error
if sampleInterval, err = time.ParseDuration(*sampleIntervalStr); err != nil {
usageAndExit(fmt.Sprintf("error: sample interval (%s) invalid", *sampleIntervalStr))
}
subscribeOptions.SampleInterval = uint64(sampleInterval)
if heartbeatInterval, err = time.ParseDuration(*heartbeatIntervalStr); err != nil {
usageAndExit(fmt.Sprintf("error: heartbeat interval (%s) invalid", *heartbeatIntervalStr))
}
subscribeOptions.HeartbeatInterval = uint64(heartbeatInterval)
args := flag.Args()
ctx := gnmi.NewContext(context.Background(), cfg)
client, err := gnmi.Dial(cfg)
if err != nil {
glog.Fatal(err)
}
var setOps []*gnmi.Operation
for i := 0; i < len(args); i++ {
switch args[i] {
case "capabilities":
if len(setOps) != 0 {
usageAndExit("error: 'capabilities' not allowed after 'merge|replace|delete'")
}
err := gnmi.Capabilities(ctx, client)
if err != nil {
glog.Fatal(err)
}
return
case "get":
if len(setOps) != 0 {
usageAndExit("error: 'get' not allowed after 'merge|replace|delete'")
}
origin, ok := parseOrigin(args[i+1])
if ok {
i++
}
err := gnmi.Get(ctx, client, gnmi.SplitPaths(args[i+1:]), origin)
if err != nil {
glog.Fatal(err)
}
return
case "subscribe":
if len(setOps) != 0 {
usageAndExit("error: 'subscribe' not allowed after 'merge|replace|delete'")
}
origin, ok := parseOrigin(args[i+1])
if ok {
i++
}
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
defer close(errChan)
subscribeOptions.Origin = origin
subscribeOptions.Paths = gnmi.SplitPaths(args[i+1:])
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
for {
select {
case resp, open := <-respChan:
if !open {
return
}
if err := gnmi.LogSubscribeResponse(resp); err != nil {
glog.Fatal(err)
}
case err := <-errChan:
glog.Fatal(err)
}
}
case "update", "replace", "delete":
if len(args) == i+1 {
usageAndExit("error: missing path")
}
op := &gnmi.Operation{
Type: args[i],
}
i++
var ok bool
op.Origin, ok = parseOrigin(args[i])
if ok {
i++
}
op.Path = gnmi.SplitPath(args[i])
if op.Type != "delete" {
if len(args) == i+1 {
usageAndExit("error: missing JSON or FILEPATH to data")
}
i++
op.Val = args[i]
}
setOps = append(setOps, op)
default:
usageAndExit(fmt.Sprintf("error: unknown operation %q", args[i]))
}
}
if len(setOps) == 0 {
usageAndExit("")
}
err = gnmi.Set(ctx, client, setOps)
if err != nil {
glog.Fatal(err)
}
}
func parseOrigin(s string) (string, bool) {
if strings.HasPrefix(s, "origin=") {
return strings.TrimPrefix(s, "origin="), true
}
return "", false
}

View File

@ -1,245 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"go/build"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/vcs"
)
// Implementation taken from "isStandardImportPath" in go's source.
func isStdLibPath(path string) bool {
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
elem := path[:i]
return !strings.Contains(elem, ".")
}
// sortImports takes in an "import" body and returns it sorted
func sortImports(in []byte, sections []string) []byte {
type importLine struct {
index int // index into inLines
path string // import path used for sorting
}
// imports holds all the import lines, separated by section. The
// first section is for stdlib imports, the following sections
// hold the user specified sections, the final section is for
// everything else.
imports := make([][]importLine, len(sections)+2)
addImport := func(section, index int, importPath string) {
imports[section] = append(imports[section], importLine{index, importPath})
}
stdlib := 0
offset := 1
other := len(imports) - 1
inLines := bytes.Split(in, []byte{'\n'})
for i, line := range inLines {
if len(line) == 0 {
continue
}
start := bytes.IndexByte(line, '"')
if start == -1 {
continue
}
if comment := bytes.Index(line, []byte("//")); comment > -1 && comment < start {
continue
}
start++ // skip '"'
end := bytes.IndexByte(line[start:], '"') + start
s := string(line[start:end])
found := false
for j, sect := range sections {
if strings.HasPrefix(s, sect) && (len(sect) == len(s) || s[len(sect)] == '/') {
addImport(j+offset, i, s)
found = true
break
}
}
if found {
continue
}
if isStdLibPath(s) {
addImport(stdlib, i, s)
} else {
addImport(other, i, s)
}
}
out := make([]byte, 0, len(in)+2)
needSeperator := false
for _, section := range imports {
if len(section) == 0 {
continue
}
if needSeperator {
out = append(out, '\n')
}
sort.Slice(section, func(a, b int) bool {
return section[a].path < section[b].path
})
for _, s := range section {
out = append(out, inLines[s.index]...)
out = append(out, '\n')
}
needSeperator = true
}
return out
}
func genFile(in []byte, sections []string) ([]byte, error) {
out := make([]byte, 0, len(in)+3) // Add some fudge to avoid re-allocation
for {
const importLine = "\nimport (\n"
const importLineLen = len(importLine)
importStart := bytes.Index(in, []byte(importLine))
if importStart == -1 {
break
}
// Save to `out` everything up to and including "import(\n"
out = append(out, in[:importStart+importLineLen]...)
in = in[importStart+importLineLen:]
importLen := bytes.Index(in, []byte("\n)\n"))
if importLen == -1 {
return nil, errors.New(`parsing error: missing ")"`)
}
// Sort body of "import" and write it to `out`
out = append(out, sortImports(in[:importLen], sections)...)
out = append(out, []byte(")")...)
in = in[importLen+2:]
}
// Write everything leftover to out
out = append(out, in...)
return out, nil
}
// returns true if the file changed
func processFile(filename string, writeFile, listDiffFiles bool, sections []string) (bool, error) {
in, err := ioutil.ReadFile(filename)
if err != nil {
return false, err
}
out, err := genFile(in, sections)
if err != nil {
return false, err
}
equal := bytes.Equal(in, out)
if listDiffFiles {
return !equal, nil
}
if !writeFile {
os.Stdout.Write(out)
return !equal, nil
}
if equal {
return false, nil
}
temp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
return false, err
}
defer os.RemoveAll(temp.Name())
s, err := os.Stat(filename)
if err != nil {
return false, err
}
if _, err = temp.Write(out); err != nil {
return false, err
}
if err := temp.Close(); err != nil {
return false, err
}
if err := os.Chmod(temp.Name(), s.Mode()); err != nil {
return false, err
}
if err := os.Rename(temp.Name(), filename); err != nil {
return false, err
}
return true, nil
}
// maps directory to vcsRoot
var vcsRootCache = make(map[string]string)
func vcsRootImportPath(f string) (string, error) {
path, err := filepath.Abs(f)
if err != nil {
return "", err
}
dir := filepath.Dir(path)
if root, ok := vcsRootCache[dir]; ok {
return root, nil
}
gopath := build.Default.GOPATH
var root string
_, root, err = vcs.FromDir(dir, filepath.Join(gopath, "src"))
if err != nil {
return "", err
}
vcsRootCache[dir] = root
return root, nil
}
func main() {
writeFile := flag.Bool("w", false, "write result to file instead of stdout")
listDiffFiles := flag.Bool("l", false, "list files whose formatting differs from importsort")
var sections multistring
flag.Var(&sections, "s", "package `prefix` to define an import section,"+
` ex: "cvshub.com/company". May be specified multiple times.`+
" If not specified the repository root is used.")
flag.Parse()
checkVCSRoot := sections == nil
for _, f := range flag.Args() {
if checkVCSRoot {
root, err := vcsRootImportPath(f)
if err != nil {
fmt.Fprintf(os.Stderr, "error determining VCS root for file %q: %s", f, err)
continue
} else {
sections = multistring{root}
}
}
diff, err := processFile(f, *writeFile, *listDiffFiles, sections)
if err != nil {
fmt.Fprintf(os.Stderr, "error while proccessing file %q: %s", f, err)
continue
}
if *listDiffFiles && diff {
fmt.Println(f)
}
}
}
type multistring []string
func (m *multistring) String() string {
return strings.Join(*m, ", ")
}
func (m *multistring) Set(s string) error {
*m = append(*m, s)
return nil
}

View File

@ -1,40 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"io/ioutil"
"testing"
)
const (
goldFile = "testdata/test.go.gold"
inFile = "testdata/test.go.in"
)
func TestImportSort(t *testing.T) {
in, err := ioutil.ReadFile(inFile)
if err != nil {
t.Fatal(err)
}
gold, err := ioutil.ReadFile(goldFile)
if err != nil {
t.Fatal(err)
}
sections := []string{"foobar", "cvshub.com/foobar"}
if out, err := genFile(gold, sections); err != nil {
t.Fatal(err)
} else if !bytes.Equal(out, gold) {
t.Errorf("importsort on %s file produced a change", goldFile)
t.Log(string(out))
}
if out, err := genFile(in, sections); err != nil {
t.Fatal(err)
} else if !bytes.Equal(out, gold) {
t.Errorf("importsort on %s different than gold", inFile)
t.Log(string(out))
}
}

View File

@ -1,52 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
)
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
"cvshub.com/other/import"
)
func foobar() {}
import (
z "bytes"
"strings"
"foobar"
_ "foobar/baz" // in line comment
. "foobar/qux" // in line comment
"cvshub.com/foobar/import"
)
import (
"bytes"
"cvshub.com/foobar/import"
)
import (
"cvshub.com/foobar/import"
)
func main() {
foobar()
}

View File

@ -1,47 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
)
import (
"bytes"
"strings"
"foobar"
"foobar/baz"
"cvshub.com/foobar/import"
"cvshub.com/other/import"
)
func foobar() {}
import (
// Comment going away
"cvshub.com/foobar/import"
"strings"
_ "foobar/baz" // in line comment
"foobar"
z "bytes"
. "foobar/qux" // in line comment
)
import (
"cvshub.com/foobar/import"
"bytes"
)
import (
"cvshub.com/foobar/import"
)
func main() {
foobar()
}

View File

@ -1,142 +0,0 @@
// Copyright (c) 2018 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// json2test reformats 'go test -json' output as text as if the -json
// flag were not passed to go test. It is useful if you want to
// analyze go test -json output, but still want a human readable test
// log.
//
// Usage:
//
// go test -json > out.txt; <analysis program> out.txt; cat out.txt | json2test
//
package main
import (
"bufio"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"log"
"os"
"time"
)
var errTestFailure = errors.New("testfailure")
func main() {
verbose := flag.Bool("v", false, "Verbose output. "+
"By default only failed tests emit verbose output in test result summary.")
flag.Parse()
err := writeTestOutput(os.Stdin, os.Stdout, *verbose)
if err == errTestFailure {
os.Exit(1)
} else if err != nil {
log.Fatal(err)
}
}
type testEvent struct {
Time time.Time // encodes as an RFC3339-format string
Action string
Package string
Test string
Elapsed float64 // seconds
Output string
}
type test struct {
pkg string
test string
}
type outputBuffer struct {
output []string
}
func (o *outputBuffer) push(s string) {
o.output = append(o.output, s)
}
type testFailure struct {
t test
o outputBuffer
}
func writeTestOutput(in io.Reader, out io.Writer, verbose bool) error {
testOutputBuffer := map[test]*outputBuffer{}
var failures []testFailure
d := json.NewDecoder(in)
buf := bufio.NewWriter(out)
defer buf.Flush()
for {
var e testEvent
if err := d.Decode(&e); err != nil {
break
}
switch e.Action {
default:
continue
case "run":
testOutputBuffer[test{pkg: e.Package, test: e.Test}] = new(outputBuffer)
case "pass":
if !verbose && e.Test == "" {
// Match go test output:
// ok foo/bar 2.109s
fmt.Fprintf(buf, "ok \t%s\t%.3fs\n", e.Package, e.Elapsed)
}
// Don't hold onto text for passing
delete(testOutputBuffer, test{pkg: e.Package, test: e.Test})
case "fail":
if !verbose {
if e.Test != "" {
// Match go test output:
// --- FAIL: TestFooBar (0.00s)
fmt.Fprintf(buf, "--- FAIL: %s (%.3f)\n", e.Test, e.Elapsed)
} else {
// Match go test output:
// FAIL foo/bar 1.444s
fmt.Fprintf(buf, "FAIL\t%s\t%.3fs\n", e.Package, e.Elapsed)
}
}
// fail may be for a package, which won't have an entry in
// testOutputBuffer because packages don't have a "run"
// action.
t := test{pkg: e.Package, test: e.Test}
if o, ok := testOutputBuffer[t]; ok {
f := testFailure{t: t, o: *o}
delete(testOutputBuffer, t)
failures = append(failures, f)
}
case "output":
if verbose {
buf.WriteString(e.Output)
}
// output may be for a package, which won't have an entry
// in testOutputBuffer because packages don't have a "run"
// action.
if o, ok := testOutputBuffer[test{pkg: e.Package, test: e.Test}]; ok {
o.push(e.Output)
}
}
}
if len(failures) == 0 {
return nil
}
buf.WriteString("\nTest failures:\n")
for i, f := range failures {
fmt.Fprintf(buf, "[%d] %s.%s\n", i+1, f.t.pkg, f.t.test)
for _, s := range f.o.output {
buf.WriteString(s)
}
if i < len(failures)-1 {
buf.WriteByte('\n')
}
}
return errTestFailure
}

View File

@ -1,58 +0,0 @@
// Copyright (c) 2018 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"io/ioutil"
"os"
"testing"
"github.com/kylelemons/godebug/diff"
)
func TestWriteTestOutput(t *testing.T) {
for name, tc := range map[string]struct {
verbose bool
inputFile string
goldFile string
}{
"quiet": {
verbose: false,
inputFile: "testdata/input.txt",
goldFile: "testdata/gold-quiet.txt",
},
"verbose": {
verbose: true,
inputFile: "testdata/input.txt",
goldFile: "testdata/gold-verbose.txt",
},
} {
t.Run(name, func(t *testing.T) {
input, err := os.Open(tc.inputFile)
if err != nil {
t.Fatal(err)
}
var out bytes.Buffer
if err := writeTestOutput(input, &out, tc.verbose); err != errTestFailure {
t.Error("expected test failure")
}
gold, err := os.Open(tc.goldFile)
if err != nil {
t.Fatal(err)
}
expected, err := ioutil.ReadAll(gold)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(out.Bytes(), expected) {
t.Errorf("output does not match %s", tc.goldFile)
t.Error("\n" + diff.Diff(string(expected), out.String()))
}
})
}
}

View File

@ -1,12 +0,0 @@
ok pkg/passed 0.013s
--- FAIL: TestPanic (600.029)
--- FAIL: TestFail (0.180)
FAIL pkg/failed 0.204s
Test failures:
[1] pkg/panic.TestPanic
panic
FAIL pkg/panic 600.029s
[2] pkg/failed.TestFail
--- FAIL: TestFail (0.18s)

View File

@ -1,16 +0,0 @@
? pkg/skipped [no test files]
=== RUN TestPass
--- PASS: TestPass (0.00s)
PASS
ok pkg/passed 0.013s
panic
FAIL pkg/panic 600.029s
--- FAIL: TestFail (0.18s)
Test failures:
[1] pkg/panic.TestPanic
panic
FAIL pkg/panic 600.029s
[2] pkg/failed.TestFail
--- FAIL: TestFail (0.18s)

View File

@ -1,17 +0,0 @@
{"Time":"2018-03-08T10:33:12.002692769-08:00","Action":"output","Package":"pkg/skipped","Output":"? \tpkg/skipped\t[no test files]\n"}
{"Time":"2018-03-08T10:33:12.003199228-08:00","Action":"skip","Package":"pkg/skipped","Elapsed":0.001}
{"Time":"2018-03-08T10:33:12.343866281-08:00","Action":"run","Package":"pkg/passed","Test":"TestPass"}
{"Time":"2018-03-08T10:33:12.34406622-08:00","Action":"output","Package":"pkg/passed","Test":"TestPass","Output":"=== RUN TestPass\n"}
{"Time":"2018-03-08T10:33:12.344139342-08:00","Action":"output","Package":"pkg/passed","Test":"TestPass","Output":"--- PASS: TestPass (0.00s)\n"}
{"Time":"2018-03-08T10:33:12.344165231-08:00","Action":"pass","Package":"pkg/passed","Test":"TestPass","Elapsed":0}
{"Time":"2018-03-08T10:33:12.344297059-08:00","Action":"output","Package":"pkg/passed","Output":"PASS\n"}
{"Time":"2018-03-08T10:33:12.345217622-08:00","Action":"output","Package":"pkg/passed","Output":"ok \tpkg/passed\t0.013s\n"}
{"Time":"2018-03-08T10:33:12.34533033-08:00","Action":"pass","Package":"pkg/passed","Elapsed":0.013}
{"Time":"2018-03-08T10:33:20.243866281-08:00","Action":"run","Package":"pkg/panic","Test":"TestPanic"}
{"Time":"2018-03-08T10:33:20.27231537-08:00","Action":"output","Package":"pkg/panic","Test":"TestPanic","Output":"panic\n"}
{"Time":"2018-03-08T10:33:20.272414481-08:00","Action":"output","Package":"pkg/panic","Test":"TestPanic","Output":"FAIL\tpkg/panic\t600.029s\n"}
{"Time":"2018-03-08T10:33:20.272440286-08:00","Action":"fail","Package":"pkg/panic","Test":"TestPanic","Elapsed":600.029}
{"Time":"2018-03-08T10:33:26.143866281-08:00","Action":"run","Package":"pkg/failed","Test":"TestFail"}
{"Time":"2018-03-08T10:33:27.158776469-08:00","Action":"output","Package":"pkg/failed","Test":"TestFail","Output":"--- FAIL: TestFail (0.18s)\n"}
{"Time":"2018-03-08T10:33:27.158860934-08:00","Action":"fail","Package":"pkg/failed","Test":"TestFail","Elapsed":0.18}
{"Time":"2018-03-08T10:33:27.161302093-08:00","Action":"fail","Package":"pkg/failed","Elapsed":0.204}

View File

@ -1,3 +0,0 @@
# occli
# DEPRECATED
Please use [gnmi](../gnmi) instead.

View File

@ -1,29 +0,0 @@
# ockafka
Client for the gRPC OpenConfig service for subscribing to the configuration and
state of a network device and feeding the stream to Kafka.
## Sample usage
Subscribe to all updates on the Arista device at `10.0.1.2` and stream to a local
Kafka instance:
```
ockafka -addrs 10.0.1.2
```
Subscribe to temperature sensors from 2 switches and stream to a remote Kafka instance:
```
ockafka -addrs 10.0.1.2,10.0.1.3 -kafkaaddrs kafka:9092 -subscribe /Sysdb/environment/temperature/status/tempSensor
```
Start in a container:
```
docker run aristanetworks/ockafka -addrs 10.0.1.1 -kafkaaddrs kafka:9092
```
## Kafka/Elastic integration demo
The following video demoes integration with Kafka and Elastic using [this Logstash instance](https://github.com/aristanetworks/docker-logstash):
[![video preview](http://img.youtube.com/vi/WsyFmxMwXYQ/0.jpg)](https://youtu.be/WsyFmxMwXYQ)

View File

@ -1,75 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The occlient tool is a client for the gRPC service for getting and setting the
// OpenConfig configuration and state of a network device.
package main
import (
"flag"
"fmt"
"strings"
"sync"
"github.com/aristanetworks/goarista/kafka"
"github.com/aristanetworks/goarista/kafka/openconfig"
"github.com/aristanetworks/goarista/kafka/producer"
"github.com/aristanetworks/goarista/openconfig/client"
"github.com/Shopify/sarama"
"github.com/aristanetworks/glog"
"github.com/golang/protobuf/proto"
)
var keysFlag = flag.String("kafkakeys", "",
"Keys for kafka messages (comma-separated, default: the value of -addrs). The key '"+
client.HostnameArg+"' is replaced by the current hostname.")
func newProducer(addresses []string, topic, key, dataset string) (producer.Producer, error) {
encodedKey := sarama.StringEncoder(key)
p, err := producer.New(openconfig.NewEncoder(topic, encodedKey, dataset), addresses, nil)
if err != nil {
return nil, fmt.Errorf("Failed to create Kafka brokers: %s", err)
}
glog.Infof("Connected to Kafka brokers at %s", addresses)
return p, nil
}
func main() {
username, password, subscriptions, grpcAddrs, opts := client.ParseFlags()
var keys []string
var err error
if *keysFlag == "" {
keys = grpcAddrs
} else {
keys, err = client.ParseHostnames(*keysFlag)
if err != nil {
glog.Fatal(err)
}
}
if len(grpcAddrs) != len(keys) {
glog.Fatal("Please provide the same number of addresses and Kafka keys")
}
addresses := strings.Split(*kafka.Addresses, ",")
wg := new(sync.WaitGroup)
for i, grpcAddr := range grpcAddrs {
key := keys[i]
p, err := newProducer(addresses, *kafka.Topic, key, grpcAddr)
if err != nil {
glog.Fatal(err)
} else {
glog.Infof("Initialized Kafka producer for %s", grpcAddr)
}
publish := func(addr string, message proto.Message) {
p.Write(message)
}
wg.Add(1)
p.Start()
defer p.Stop()
c := client.New(username, password, grpcAddr, opts)
go c.Subscribe(wg, subscriptions, publish)
}
wg.Wait()
}

View File

@ -1,37 +0,0 @@
# ocprometheus
This is a client for the OpenConfig gRPC interface that pushes telemetry to
Prometheus. Numerical and boolean (converted to 1 for true and 0 for false) are
supported. Non-numerical data isn't supported by Prometheus and is silently
dropped. Arrays (even with numeric values) are not yet supported.
This tool requires a config file to specify how to map the path of the
notificatons coming out of the OpenConfig gRPC interface onto Prometheus
metric names, and how to extract labels from the path. For example, the
following rule, excerpt from `sampleconfig.yml`:
```yaml
metrics:
- name: tempSensor
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
help: Temperature and Maximum Temperature
# ...
```
Applied to an update for the path
`/Sysdb/environment/temperature/status/tempSensor/TempSensor1/temperature/value`
will lead to the metric name `tempSensor` and labels `sensor=TempSensor1` and `type=temperature`.
Basically, named groups are used to extract (optional) metrics.
Unnamed groups will be given labels names like "unnamedLabelX" (where X is the group's position).
The timestamps from the notifications are not preserved since Prometheus uses a pull model and
doesn't have (yet) support for exporter specified timestamps.
Prometheus 2.0 will probably support timestamps.
## Usage
See the `-help` output, but here's an example to push all the metrics defined
in the sample config file:
```
ocprometheus -addr <switch-hostname>:6042 -config sampleconfig.json
```

View File

@ -1,234 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"encoding/json"
"math"
"strings"
"sync"
"github.com/aristanetworks/glog"
"github.com/aristanetworks/goarista/gnmi"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
pb "github.com/openconfig/gnmi/proto/gnmi"
"github.com/prometheus/client_golang/prometheus"
)
// A metric source.
type source struct {
addr string
path string
}
// Since the labels are fixed per-path and per-device we can cache them here,
// to avoid recomputing them.
type labelledMetric struct {
metric prometheus.Metric
labels []string
defaultValue float64
stringMetric bool
}
type collector struct {
// Protects access to metrics map
m sync.Mutex
metrics map[source]*labelledMetric
config *Config
}
func newCollector(config *Config) *collector {
return &collector{
metrics: make(map[source]*labelledMetric),
config: config,
}
}
// Process a notification and update or create the corresponding metrics.
func (c *collector) update(addr string, message proto.Message) {
resp, ok := message.(*pb.SubscribeResponse)
if !ok {
glog.Errorf("Unexpected type of message: %T", message)
return
}
notif := resp.GetUpdate()
if notif == nil {
return
}
device := strings.Split(addr, ":")[0]
prefix := gnmi.StrPath(notif.Prefix)
// Process deletes first
for _, del := range notif.Delete {
path := prefix + gnmi.StrPath(del)
key := source{addr: device, path: path}
c.m.Lock()
if _, ok := c.metrics[key]; ok {
delete(c.metrics, key)
} else {
// TODO: replace this with a prefix tree
p := path + "/"
for k := range c.metrics {
if k.addr == device && strings.HasPrefix(k.path, p) {
delete(c.metrics, k)
}
}
}
c.m.Unlock()
}
// Process updates next
for _, update := range notif.Update {
path := prefix + gnmi.StrPath(update.Path)
value, suffix, ok := parseValue(update)
if !ok {
continue
}
var strUpdate bool
var floatVal float64
var strVal string
switch v := value.(type) {
case float64:
strUpdate = false
floatVal = v
case string:
strUpdate = true
strVal = v
}
if suffix != "" {
path += "/" + suffix
}
src := source{addr: device, path: path}
c.m.Lock()
// Use the cached labels and descriptor if available
if m, ok := c.metrics[src]; ok {
if strUpdate {
// Skip string updates for non string metrics
if !m.stringMetric {
c.m.Unlock()
continue
}
// Display a default value and replace the value label with the string value
floatVal = m.defaultValue
m.labels[len(m.labels)-1] = strVal
}
m.metric = prometheus.MustNewConstMetric(m.metric.Desc(), prometheus.GaugeValue,
floatVal, m.labels...)
c.m.Unlock()
continue
}
c.m.Unlock()
// Get the descriptor and labels for this source
metric := c.config.getMetricValues(src)
if metric == nil || metric.desc == nil {
glog.V(8).Infof("Ignoring unmatched update %v at %s:%s with value %+v",
update, device, path, value)
continue
}
if strUpdate {
if !metric.stringMetric {
// Skip string updates for non string metrics
continue
}
// Display a default value and replace the value label with the string value
floatVal = metric.defaultValue
metric.labels[len(metric.labels)-1] = strVal
}
// Save the metric and labels in the cache
c.m.Lock()
lm := prometheus.MustNewConstMetric(metric.desc, prometheus.GaugeValue,
floatVal, metric.labels...)
c.metrics[src] = &labelledMetric{
metric: lm,
labels: metric.labels,
defaultValue: metric.defaultValue,
stringMetric: metric.stringMetric,
}
c.m.Unlock()
}
}
// parseValue takes in an update and parses a value and suffix
// Returns an interface that contains either a string or a float64 as well as a suffix
// Unparseable updates return (0, empty string, false)
func parseValue(update *pb.Update) (interface{}, string, bool) {
intf, err := gnmi.ExtractValue(update)
if err != nil {
return 0, "", false
}
switch value := intf.(type) {
// float64 or string expected as the return value
case int64:
return float64(value), "", true
case uint64:
return float64(value), "", true
case float32:
return float64(value), "", true
case *pb.Decimal64:
val := gnmi.DecimalToFloat(value)
if math.IsInf(val, 0) || math.IsNaN(val) {
return 0, "", false
}
return val, "", true
case json.Number:
valFloat, err := value.Float64()
if err != nil {
return value, "", true
}
return valFloat, "", true
case *any.Any:
return value.String(), "", true
case []interface{}:
// extract string represetation for now
return gnmi.StrVal(update.Val), "", false
case map[string]interface{}:
if vIntf, ok := value["value"]; ok {
if num, ok := vIntf.(json.Number); ok {
valFloat, err := num.Float64()
if err != nil {
return num, "value", true
}
return valFloat, "value", true
}
}
case bool:
if value {
return float64(1), "", true
}
return float64(0), "", true
case string:
return value, "", true
default:
glog.V(9).Infof("Ignoring update with unexpected type: %T", value)
}
return 0, "", false
}
// Describe implements prometheus.Collector interface
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
c.config.getAllDescs(ch)
}
// Collect implements prometheus.Collector interface
func (c *collector) Collect(ch chan<- prometheus.Metric) {
c.m.Lock()
for _, m := range c.metrics {
ch <- m.metric
}
c.m.Unlock()
}

View File

@ -1,369 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"fmt"
"strings"
"testing"
"github.com/aristanetworks/goarista/gnmi"
"github.com/aristanetworks/goarista/test"
pb "github.com/openconfig/gnmi/proto/gnmi"
"github.com/prometheus/client_golang/prometheus"
)
func makeMetrics(cfg *Config, expValues map[source]float64, notification *pb.Notification,
prevMetrics map[source]*labelledMetric) map[source]*labelledMetric {
expMetrics := map[source]*labelledMetric{}
if prevMetrics != nil {
expMetrics = prevMetrics
}
for src, v := range expValues {
metric := cfg.getMetricValues(src)
if metric == nil || metric.desc == nil || metric.labels == nil {
panic("cfg.getMetricValues returned nil")
}
// Preserve current value of labels
labels := metric.labels
if _, ok := expMetrics[src]; ok && expMetrics[src] != nil {
labels = expMetrics[src].labels
}
// Handle string updates
if notification.Update != nil {
if update, err := findUpdate(notification, src.path); err == nil {
val, _, ok := parseValue(update)
if !ok {
continue
}
if strVal, ok := val.(string); ok {
if !metric.stringMetric {
continue
}
v = metric.defaultValue
labels[len(labels)-1] = strVal
}
}
}
expMetrics[src] = &labelledMetric{
metric: prometheus.MustNewConstMetric(metric.desc, prometheus.GaugeValue, v,
labels...),
labels: labels,
defaultValue: metric.defaultValue,
stringMetric: metric.stringMetric,
}
}
// Handle deletion
for key := range expMetrics {
if _, ok := expValues[key]; !ok {
delete(expMetrics, key)
}
}
return expMetrics
}
func findUpdate(notif *pb.Notification, path string) (*pb.Update, error) {
prefix := notif.Prefix
for _, v := range notif.Update {
fullPath := gnmi.StrPath(gnmi.JoinPaths(prefix, v.Path))
if strings.Contains(path, fullPath) || path == fullPath {
return v, nil
}
}
return nil, fmt.Errorf("Failed to find matching update for path %v", path)
}
func makeResponse(notif *pb.Notification) *pb.SubscribeResponse {
return &pb.SubscribeResponse{
Response: &pb.SubscribeResponse_Update{Update: notif},
}
}
func makePath(pathStr string) *pb.Path {
splitPath := gnmi.SplitPath(pathStr)
path, err := gnmi.ParseGNMIElements(splitPath)
if err != nil {
return &pb.Path{}
}
return path
}
func TestUpdate(t *testing.T) {
config := []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
- /Sysdb/bridging/igmpsnooping/forwarding/forwarding/status
metrics:
- name: fanName
path: /Sysdb/environment/cooling/status/fan/name
help: Fan Name
valuelabel: name
defaultvalue: 2.5
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed
- name: igmpSnoopingInf
path: /Sysdb/igmpsnooping/vlanStatus/(?P<vlan>.+)/ethGroup/(?P<mac>.+)/intf/(?P<intf>.+)
help: IGMP snooping status`)
cfg, err := parseConfig(config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
coll := newCollector(cfg)
notif := &pb.Notification{
Prefix: makePath("Sysdb"),
Update: []*pb.Update{
{
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("42")},
},
},
{
Path: makePath("environment/cooling/status/fan/speed"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("{\"value\": 45}")},
},
},
{
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
},
},
{
Path: makePath("environment/cooling/status/fan/name"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("\"Fan1.1\"")},
},
},
},
}
expValues := map[source]float64{
{
addr: "10.1.1.1",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
}: 42,
{
addr: "10.1.1.1",
path: "/Sysdb/environment/cooling/status/fan/speed/value",
}: 45,
{
addr: "10.1.1.1",
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu",
}: 1,
{
addr: "10.1.1.1",
path: "/Sysdb/environment/cooling/status/fan/name",
}: 2.5,
}
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics := makeMetrics(cfg, expValues, notif, nil)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Update two values, and one path which is not a metric
notif = &pb.Notification{
Prefix: makePath("Sysdb"),
Update: []*pb.Update{
{
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("52")},
},
},
{
Path: makePath("environment/cooling/status/fan/name"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("\"Fan2.1\"")},
},
},
{
Path: makePath("environment/doesntexist/status"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("{\"value\": 45}")},
},
},
},
}
src := source{
addr: "10.1.1.1",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
}
expValues[src] = 52
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Same path, different device
notif = &pb.Notification{
Prefix: makePath("Sysdb"),
Update: []*pb.Update{
{
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("42")},
},
},
},
}
src.addr = "10.1.1.2"
expValues[src] = 42
coll.update("10.1.1.2:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Delete a path
notif = &pb.Notification{
Prefix: makePath("Sysdb"),
Delete: []*pb.Path{makePath("lag/intfCounterDir/Ethernet1/intfCounter")},
}
src.addr = "10.1.1.1"
delete(expValues, src)
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Non-numeric update to path without value label
notif = &pb.Notification{
Prefix: makePath("Sysdb"),
Update: []*pb.Update{
{
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("\"test\"")},
},
},
},
}
coll.update("10.1.1.1:6042", makeResponse(notif))
// Don't make new metrics as it should have no effect
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
}
func TestCoalescedDelete(t *testing.T) {
config := []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
- /Sysdb/bridging/igmpsnooping/forwarding/forwarding/status
metrics:
- name: fanName
path: /Sysdb/environment/cooling/status/fan/name
help: Fan Name
valuelabel: name
defaultvalue: 2.5
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed
- name: igmpSnoopingInf
path: /Sysdb/igmpsnooping/vlanStatus/(?P<vlan>.+)/ethGroup/(?P<mac>.+)/intf/(?P<intf>.+)
help: IGMP snooping status`)
cfg, err := parseConfig(config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
coll := newCollector(cfg)
notif := &pb.Notification{
Prefix: makePath("Sysdb"),
Update: []*pb.Update{
{
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
},
},
{
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02/intf/Cpu"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
},
},
{
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:03/intf/Cpu"),
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
},
},
},
}
expValues := map[source]float64{
{
addr: "10.1.1.1",
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu",
}: 1,
{
addr: "10.1.1.1",
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02/intf/Cpu",
}: 1,
{
addr: "10.1.1.1",
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:03/intf/Cpu",
}: 1,
}
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics := makeMetrics(cfg, expValues, notif, nil)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
// Delete a subtree
notif = &pb.Notification{
Prefix: makePath("Sysdb"),
Delete: []*pb.Path{makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02")},
}
src := source{
addr: "10.1.1.1",
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02/intf/Cpu",
}
delete(expValues, src)
coll.update("10.1.1.1:6042", makeResponse(notif))
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
if !test.DeepEqual(expMetrics, coll.metrics) {
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
}
}

View File

@ -1,142 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"fmt"
"regexp"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v2"
)
// Config is the representation of ocprometheus's YAML config file.
type Config struct {
// Per-device labels.
DeviceLabels map[string]prometheus.Labels
// Prefixes to subscribe to.
Subscriptions []string
// Metrics to collect and how to munge them.
Metrics []*MetricDef
}
// MetricDef is the representation of a metric definiton in the config file.
type MetricDef struct {
// Path is a regexp to match on the Update's full path.
// The regexp must be a prefix match.
// The regexp can define named capture groups to use as labels.
Path string
// Path compiled as a regexp.
re *regexp.Regexp `deepequal:"ignore"`
// Metric name.
Name string
// Metric help string.
Help string
// Label to store string values
ValueLabel string
// Default value to display for string values
DefaultValue float64
// Does the metric store a string value
stringMetric bool
// This map contains the metric descriptors for this metric for each device.
devDesc map[string]*prometheus.Desc
// This is the default metric descriptor for devices that don't have explicit descs.
desc *prometheus.Desc
}
// metricValues contains the values used in updating a metric
type metricValues struct {
desc *prometheus.Desc
labels []string
defaultValue float64
stringMetric bool
}
// Parses the config and creates the descriptors for each path and device.
func parseConfig(cfg []byte) (*Config, error) {
config := &Config{
DeviceLabels: make(map[string]prometheus.Labels),
}
if err := yaml.Unmarshal(cfg, config); err != nil {
return nil, fmt.Errorf("Failed to parse config: %v", err)
}
for _, def := range config.Metrics {
def.re = regexp.MustCompile(def.Path)
// Extract label names
reNames := def.re.SubexpNames()[1:]
labelNames := make([]string, len(reNames))
for i, n := range reNames {
labelNames[i] = n
if n == "" {
labelNames[i] = "unnamedLabel" + strconv.Itoa(i+1)
}
}
if def.ValueLabel != "" {
labelNames = append(labelNames, def.ValueLabel)
def.stringMetric = true
}
// Create a default descriptor only if there aren't any per-device labels,
// or if it's explicitly declared
if len(config.DeviceLabels) == 0 || len(config.DeviceLabels["*"]) > 0 {
def.desc = prometheus.NewDesc(def.Name, def.Help, labelNames, config.DeviceLabels["*"])
}
// Add per-device descriptors
def.devDesc = make(map[string]*prometheus.Desc)
for device, labels := range config.DeviceLabels {
if device == "*" {
continue
}
def.devDesc[device] = prometheus.NewDesc(def.Name, def.Help, labelNames, labels)
}
}
return config, nil
}
// Returns a struct containing the descriptor corresponding to the device and path, labels
// extracted from the path, the default value for the metric and if it accepts string values.
// If the device and path doesn't match any metrics, returns nil.
func (c *Config) getMetricValues(s source) *metricValues {
for _, def := range c.Metrics {
if groups := def.re.FindStringSubmatch(s.path); groups != nil {
if def.ValueLabel != "" {
groups = append(groups, def.ValueLabel)
}
desc, ok := def.devDesc[s.addr]
if !ok {
desc = def.desc
}
return &metricValues{desc: desc, labels: groups[1:], defaultValue: def.DefaultValue,
stringMetric: def.stringMetric}
}
}
return nil
}
// Sends all the descriptors to the channel.
func (c *Config) getAllDescs(ch chan<- *prometheus.Desc) {
for _, def := range c.Metrics {
// Default descriptor might not be present
if def.desc != nil {
ch <- def.desc
}
for _, desc := range def.devDesc {
ch <- desc
}
}
}

View File

@ -1,449 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"regexp"
"testing"
"github.com/aristanetworks/goarista/test"
"github.com/prometheus/client_golang/prometheus"
)
func TestParseConfig(t *testing.T) {
tCases := []struct {
input []byte
config Config
}{
{
input: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: fanName
path: /Sysdb/environment/cooling/status/fan/name
help: Fan Name
valuelabel: name
defaultvalue: 25
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{
"10.1.1.1": {
"lab1": "val1",
"lab2": "val2",
},
"*": {
"lab1": "val3",
"lab2": "val4",
},
},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/environment/cooling/status/fan/name",
re: regexp.MustCompile(
"/Sysdb/environment/cooling/status/fan/name"),
Name: "fanName",
Help: "Fan Name",
ValueLabel: "name",
DefaultValue: 25,
stringMetric: true,
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("fanName",
"Fan Name",
[]string{"name"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
desc: prometheus.NewDesc("fanName",
"Fan Name",
[]string{"name"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
{
Path: "/Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
desc: prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile("/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
},
},
},
{
input: []byte(`
devicelabels:
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{
"*": {
"lab1": "val3",
"lab2": "val4",
},
},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile(
"/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
},
},
},
},
{
input: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{
"10.1.1.1": {
"lab1": "val1",
"lab2": "val2",
},
},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile(
"/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{
"10.1.1.1": prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
},
},
},
{
input: []byte(`
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/fan/speed/value
help: Fan Speed`),
config: Config{
DeviceLabels: map[string]prometheus.Labels{},
Subscriptions: []string{
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
},
Metrics: []*MetricDef{
{
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
re: regexp.MustCompile(
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
Name: "intfCounter",
Help: "Per-Interface Bytes/Errors/Discards Counters",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("intfCounter",
"Per-Interface Bytes/Errors/Discards Counters",
[]string{"intf"}, prometheus.Labels{}),
},
{
Path: "/Sysdb/environment/cooling/fan/speed/value",
re: regexp.MustCompile(
"/Sysdb/environment/cooling/fan/speed/value"),
Name: "fanSpeed",
Help: "Fan Speed",
devDesc: map[string]*prometheus.Desc{},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{}),
},
},
},
},
}
for i, c := range tCases {
cfg, err := parseConfig(c.input)
if err != nil {
t.Errorf("Unexpected error in case %d: %v", i+1, err)
continue
}
if !test.DeepEqual(*cfg, c.config) {
t.Errorf("Test case %d: mismatch %v", i+1, test.Diff(*cfg, c.config))
}
}
}
func TestGetMetricValues(t *testing.T) {
config := []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed`)
cfg, err := parseConfig(config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
tCases := []struct {
src source
desc *prometheus.Desc
labels []string
}{
{
src: source{
addr: "10.1.1.1",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
},
desc: prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
labels: []string{"lag", "Ethernet1"},
},
{
src: source{
addr: "10.2.2.2",
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
},
desc: prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
labels: []string{"lag", "Ethernet1"},
},
{
src: source{
addr: "10.2.2.2",
path: "/Sysdb/environment/cooling/status/fan/speed/value",
},
desc: prometheus.NewDesc("fanSpeed", "Fan Speed",
[]string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
labels: []string{},
},
{
src: source{
addr: "10.2.2.2",
path: "/Sysdb/environment/nonexistent",
},
desc: nil,
labels: nil,
},
}
for i, c := range tCases {
metric := cfg.getMetricValues(c.src)
if metric == nil {
// Avoids error from trying to access metric.desc when metric is nil
metric = &metricValues{}
}
if !test.DeepEqual(metric.desc, c.desc) {
t.Errorf("Test case %d: desc mismatch %v", i+1, test.Diff(metric.desc, c.desc))
}
if !test.DeepEqual(metric.labels, c.labels) {
t.Errorf("Test case %d: labels mismatch %v", i+1, test.Diff(metric.labels, c.labels))
}
}
}
func TestGetAllDescs(t *testing.T) {
tCases := []struct {
config []byte
descs []*prometheus.Desc
}{
{
config: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed`),
descs: []*prometheus.Desc{
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
{
config: []byte(`
devicelabels:
10.1.1.1:
lab1: val1
lab2: val2
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
metrics:
- name: intfCounter
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
help: Per-Interface Bytes/Errors/Discards Counters
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/speed/value
help: Fan Speed`),
descs: []*prometheus.Desc{
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
[]string{"unnamedLabel1", "intf"},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
},
},
}
for i, c := range tCases {
cfg, err := parseConfig(c.config)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
ch := make(chan *prometheus.Desc, 10)
cfg.getAllDescs(ch)
j := 0
for d := range ch {
if !test.DeepEqual(c.descs[j], d) {
t.Errorf("Test case %d: desc %d mismatch %v", i+1, j+1, test.Diff(c.descs[j], d))
}
j++
if j == len(c.descs) {
break
}
}
select {
case <-ch:
t.Errorf("Test case %d: too many descs", i+1)
default:
}
}
}

View File

@ -1,93 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The ocprometheus implements a Prometheus exporter for OpenConfig telemetry data.
package main
import (
"context"
"flag"
"io/ioutil"
"net/http"
"strings"
"github.com/aristanetworks/glog"
"github.com/aristanetworks/goarista/gnmi"
pb "github.com/openconfig/gnmi/proto/gnmi"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func main() {
// gNMI options
gNMIcfg := &gnmi.Config{}
flag.StringVar(&gNMIcfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
flag.StringVar(&gNMIcfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&gNMIcfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&gNMIcfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&gNMIcfg.Username, "username", "", "Username to authenticate with")
flag.StringVar(&gNMIcfg.Password, "password", "", "Password to authenticate with")
flag.BoolVar(&gNMIcfg.TLS, "tls", false, "Enable TLS")
subscribePaths := flag.String("subscribe", "/", "Comma-separated list of paths to subscribe to")
// program options
listenaddr := flag.String("listenaddr", ":8080", "Address on which to expose the metrics")
url := flag.String("url", "/metrics", "URL where to expose the metrics")
configFlag := flag.String("config", "",
"Config to turn OpenConfig telemetry into Prometheus metrics")
flag.Parse()
subscriptions := strings.Split(*subscribePaths, ",")
if *configFlag == "" {
glog.Fatal("You need specify a config file using -config flag")
}
cfg, err := ioutil.ReadFile(*configFlag)
if err != nil {
glog.Fatalf("Can't read config file %q: %v", *configFlag, err)
}
config, err := parseConfig(cfg)
if err != nil {
glog.Fatal(err)
}
// Ignore the default "subscribe-to-everything" subscription of the
// -subscribe flag.
if subscriptions[0] == "/" {
subscriptions = subscriptions[1:]
}
// Add the subscriptions from the config file.
subscriptions = append(subscriptions, config.Subscriptions...)
coll := newCollector(config)
prometheus.MustRegister(coll)
ctx := gnmi.NewContext(context.Background(), gNMIcfg)
client, err := gnmi.Dial(gNMIcfg)
if err != nil {
glog.Fatal(err)
}
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
subscribeOptions := &gnmi.SubscribeOptions{
Mode: "stream",
StreamMode: "target_defined",
Paths: gnmi.SplitPaths(subscriptions),
}
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
go handleSubscription(respChan, errChan, coll, gNMIcfg.Addr)
http.Handle(*url, promhttp.Handler())
glog.Fatal(http.ListenAndServe(*listenaddr, nil))
}
func handleSubscription(respChan chan *pb.SubscribeResponse,
errChan chan error, coll *collector, addr string) {
for {
select {
case resp := <-respChan:
coll.update(addr, resp)
case err := <-errChan:
glog.Fatal(err)
}
}
}

View File

@ -1,80 +0,0 @@
# Per-device labels. Optional
# Exactly the same set of labels must be specified for each device.
# If device address is *, the labels apply to all devices not listed explicitly.
# If any explicit device if listed below, then you need to specify all devices you're subscribed to,
# or have a wildcard entry. Otherwise, updates from non-listed devices will be ignored.
#deviceLabels:
# 10.1.1.1:
# lab1: val1
# lab2: val2
# '*':
# lab1: val3
# lab2: val4
# Subscriptions to OpenConfig paths.
subscriptions:
- /Smash/counters/ethIntf
- /Smash/interface/counter/lag/current/counter
- /Sysdb/environment/archer/cooling/status
- /Sysdb/environment/archer/power/status
- /Sysdb/environment/archer/temperature/status
- /Sysdb/hardware/archer/xcvr/status
- /Sysdb/interface/config/eth
# Prometheus metrics configuration.
# If you use named capture groups in the path, they will be extracted into labels with the same name.
# All fields are mandatory.
metrics:
- name: interfaceDescription
path: /Sysdb/interface/config/eth/phy/slice/1/intfConfig/(?P<interface>Ethernet.)/description
help: Description
valuelabel: description
defaultvalue: 15
- name: intfCounter
path: /Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)
help: Per-Interface Bytes/Errors/Discards Counters
- name: intfLagCounter
path: /Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)
help: Per-Lag Bytes/Errors/Discards Counters
- name: intfPktCounter
path: /Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)
help: Per-Interface Unicast/Multicast/Broadcast Packer Counters
- name: intfLagPktCounter
path: /Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)
help: Per-Lag Unicast/Multicast/Broadcast Packer Counters
- name: intfPfcClassCounter
path: /Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)
help: Per-Interface Input/Output PFC Frames Counters
- name: tempSensor
path: /Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/((?:maxT|t)emperature)
help: Temperature and Maximum Temperature
- name: tempSensorAlert
path: /Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/(alertRaisedCount)
help: Temperature Alerts Counter
- name: currentSensor
path: /Sysdb/(environment)/archer/power/status/currentSensor/(?P<sensor>.+)/(current)
help: Current Levels
- name: powerSensor
path: /Sysdb/(environment)/archer/(power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power
help: Input/Output Power Levels
- name: voltageSensor
path: /Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(voltage)
help: Voltage Levels
- name: railCurrentSensor
path: /Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(current)
help: Rail Current Levels
- name: fanSpeed
path: /Sysdb/(environment)/archer/(cooling)/status/(?P<fan>.+)/speed
help: Fan Speed
- name: qsfpModularRxPower
path: /Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)
help: qsfpModularRxPower
- name: qsfpFixedRxPower
path: /Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)
help: qsfpFixedRxPower
- name: sfpModularTemperature
path: /Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/lastDomUpdateTime/(temperature)
help: sfpModularTemperature
- name: sfpFixedTemperature
path: /Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/lastDomUpdateTime/(temperature)
help: sfpFixedTemperature

View File

@ -1,62 +0,0 @@
# Per-device labels. Optional
# Exactly the same set of labels must be specified for each device.
# If device address is *, the labels apply to all devices not listed explicitly.
# If any explicit device if listed below, then you need to specify all devices you're subscribed to,
# or have a wildcard entry. Otherwise, updates from non-listed devices will be ignored.
deviceLabels:
10.1.1.1:
lab1: val1
lab2: val2
'*':
lab1: val3
lab2: val4
# Subscriptions to OpenConfig paths.
subscriptions:
- /Sysdb/environment/cooling/status
- /Sysdb/environment/power/status
- /Sysdb/environment/temperature/status
- /Sysdb/interface/counter/eth/lag
- /Sysdb/interface/counter/eth/slice/phy
- /Sysdb/interface/config
- /Sysdb/interface/config/eth/phy/slice/1/intfConfig
# Prometheus metrics configuration.
# If you use named capture groups in the path, they will be extracted into labels with the same name.
# All fields are mandatory.
metrics:
- name: interfaceDescription
path: Sysdb/interface/config/eth/phy/slice/1/intfConfig/(?P<interface>Ethernet.)/description
help: Description
valuelabel: description
defaultvalue: 15
- name: intfCounter
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(Octets|Errors|Discards))
help: Per-Interface Bytes/Errors/Discards Counters
- name: intfPktCounter
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))Pkt
help: Per-Interface Unicast/Multicast/Broadcast Packer Counters
- name: intfPfcClassCounter
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/ethStatistics/(?P<direction>(?:in|out))PfcClassFrames
help: Per-Interface Input/Output PFC Frames Counters
- name: tempSensor
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
help: Temperature and Maximum Temperature
- name: tempSensorAlert
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/alertRaisedCount
help: Temperature Alerts Counter
- name: currentSensor
path: /Sysdb/environment/power/status/currentSensor/(?P<sensor>.+)/current/value
help: Current Levels
- name: powerSensor
path: /Sysdb/environment/power/status/powerSupply/(?P<sensor>.+)/(?P<direction>(input|output))Power/value
help: Input/Output Power Levels
- name: voltageSensor
path: /Sysdb/environment/power/status/voltageSensor/(?P<sensor>.+)/voltage/value
help: Voltage Levels
- name: railCurrentSensor
path: /Sysdb/environment/power/status/voltageSensor/(?P<sensor>.+)/current/value
help: Rail Current Levels
- name: fanSpeed
path: /Sysdb/environment/cooling/status/fan/(?P<fan>.+)/speed/value
help: Fan Speed

View File

@ -1,21 +0,0 @@
# ocredis
This is a client for the OpenConfig gRPC interface that publishes data to
Redis. Values are stored in JSON. Every update is pushed to Redis twice:
1. as a [hash map](http://redis.io/topics/data-types-intro#hashes) update,
where the path in Redis is the path to the entity or collection (aka
container or list, in YANG speak) and the keys of the hash are the
attributes (leaf names, in YANG speak).
2. as a [`PUBLISH`](http://redis.io/commands/publish) command sent onto
the path to the entity or collection, so that consumers can receive
updates in a streaming fashion from Redis.
## Usage
See the `-help` output, but here's an example to push all the temperature
sensors into Redis. You can also not pass any `-subscribe` flag to push
_everything_ into Redis.
```
ocredis -subscribe /Sysdb/environment/temperature -addr <switch-hostname>:6042 -redis <redis-hostname>:6379
```

View File

@ -1,193 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The ocredis tool is a client for the OpenConfig gRPC interface that
// subscribes to state and pushes it to Redis, using Redis' support for hash
// maps and for publishing events that can be subscribed to.
package main
import (
"context"
"encoding/json"
"flag"
"strings"
"github.com/aristanetworks/goarista/gnmi"
"github.com/aristanetworks/glog"
pb "github.com/openconfig/gnmi/proto/gnmi"
redis "gopkg.in/redis.v4"
)
var clusterMode = flag.Bool("cluster", false, "Whether the redis server is a cluster")
var redisFlag = flag.String("redis", "",
"Comma separated list of Redis servers to push updates to")
var redisPassword = flag.String("redispass", "", "Password of redis server/cluster")
// baseClient allows us to represent both a redis.Client and redis.ClusterClient.
type baseClient interface {
Close() error
ClusterInfo() *redis.StringCmd
HDel(string, ...string) *redis.IntCmd
HMSet(string, map[string]string) *redis.StatusCmd
Ping() *redis.StatusCmd
Pipelined(func(*redis.Pipeline) error) ([]redis.Cmder, error)
Publish(string, string) *redis.IntCmd
}
var client baseClient
func main() {
// gNMI options
cfg := &gnmi.Config{}
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
subscribePaths := flag.String("subscribe", "/", "Comma-separated list of paths to subscribe to")
flag.Parse()
if *redisFlag == "" {
glog.Fatal("Specify the address of the Redis server to write to with -redis")
}
subscriptions := strings.Split(*subscribePaths, ",")
redisAddrs := strings.Split(*redisFlag, ",")
if !*clusterMode && len(redisAddrs) > 1 {
glog.Fatal("Please pass only 1 redis address in noncluster mode or enable cluster mode")
}
if *clusterMode {
client = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: redisAddrs,
Password: *redisPassword,
})
} else {
client = redis.NewClient(&redis.Options{
Addr: *redisFlag,
Password: *redisPassword,
})
}
defer client.Close()
// TODO: Figure out ways to handle being in the wrong mode:
// Connecting to cluster in non cluster mode - we get a MOVED error on the first HMSET
// Connecting to a noncluster in cluster mode - we get stuck forever
_, err := client.Ping().Result()
if err != nil {
glog.Fatal("Failed to connect to client: ", err)
}
ctx := gnmi.NewContext(context.Background(), cfg)
client, err := gnmi.Dial(cfg)
if err != nil {
glog.Fatal(err)
}
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
subscribeOptions := &gnmi.SubscribeOptions{
Mode: "stream",
StreamMode: "target_defined",
Paths: gnmi.SplitPaths(subscriptions),
}
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
for {
select {
case resp := <-respChan:
bufferToRedis(cfg.Addr, resp.GetUpdate())
case err := <-errChan:
glog.Fatal(err)
}
}
}
type redisData struct {
key string
hmset map[string]string
hdel []string
pub map[string]interface{}
}
func bufferToRedis(addr string, notif *pb.Notification) {
if notif == nil {
// possible that this should be ignored silently
glog.Error("Nil notification ignored")
return
}
path := addr + "/" + joinPath(notif.Prefix)
data := &redisData{key: path}
if len(notif.Update) != 0 {
hmset := make(map[string]string, len(notif.Update))
// Updates to publish on the pub/sub.
pub := make(map[string]interface{}, len(notif.Update))
for _, update := range notif.Update {
key := joinPath(update.Path)
value, err := gnmi.ExtractValue(update)
if err != nil {
glog.Fatalf("Failed to extract valid type from %#v", update)
}
pub[key] = value
marshaledValue, err := json.Marshal(value)
if err != nil {
glog.Fatalf("Failed to JSON marshal update %#v", update)
}
hmset[key] = string(marshaledValue)
}
data.hmset = hmset
data.pub = pub
}
if len(notif.Delete) != 0 {
hdel := make([]string, len(notif.Delete))
for i, del := range notif.Delete {
hdel[i] = joinPath(del)
}
data.hdel = hdel
}
pushToRedis(data)
}
func pushToRedis(data *redisData) {
_, err := client.Pipelined(func(pipe *redis.Pipeline) error {
if data.hmset != nil {
if reply := client.HMSet(data.key, data.hmset); reply.Err() != nil {
glog.Fatal("Redis HMSET error: ", reply.Err())
}
redisPublish(data.key, "updates", data.pub)
}
if data.hdel != nil {
if reply := client.HDel(data.key, data.hdel...); reply.Err() != nil {
glog.Fatal("Redis HDEL error: ", reply.Err())
}
redisPublish(data.key, "deletes", data.hdel)
}
return nil
})
if err != nil {
glog.Fatal("Failed to send Pipelined commands: ", err)
}
}
func redisPublish(path, kind string, payload interface{}) {
js, err := json.Marshal(map[string]interface{}{
"kind": kind,
"payload": payload,
})
if err != nil {
glog.Fatalf("JSON error: %s", err)
}
if reply := client.Publish(path, string(js)); reply.Err() != nil {
glog.Fatal("Redis PUBLISH error: ", reply.Err())
}
}
func joinPath(path *pb.Path) string {
return gnmi.StrPath(path)
}

View File

@ -1,12 +0,0 @@
# ocsplunk
Client for the gRPC OpenConfig service which subscribes to the configuration and
state of a network device and sends it to the Splunk HTTP Event Collector.
## Sample usage
```
ocsplunk -addr 10.0.1.2 -splunkurls https://splunk:8088 -splunktoken 00000000-0000-0000-0000-000000000000
```
![preview](preview.png)

View File

@ -1,126 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"context"
"crypto/tls"
"flag"
"fmt"
"net/http"
"os"
"strings"
"time"
"github.com/aristanetworks/glog"
"github.com/aristanetworks/goarista/gnmi"
"github.com/aristanetworks/splunk-hec-go"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
func exitWithError(s string) {
fmt.Fprintln(os.Stderr, s)
os.Exit(1)
}
func main() {
// gNMI options
cfg := &gnmi.Config{}
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
subscribePaths := flag.String("paths", "/", "Comma-separated list of paths to subscribe to")
// Splunk options
splunkURLs := flag.String("splunkurls", "https://localhost:8088",
"Comma-separated list of URLs of the Splunk servers")
splunkToken := flag.String("splunktoken", "", "Token to connect to the Splunk servers")
splunkIndex := flag.String("splunkindex", "", "Index for the data in Splunk")
flag.Parse()
// gNMI connection
ctx := gnmi.NewContext(context.Background(), cfg)
// Store the address without the port so it can be used as the host in the Splunk event.
addr := cfg.Addr
client, err := gnmi.Dial(cfg)
if err != nil {
glog.Fatal(err)
}
// Splunk connection
urls := strings.Split(*splunkURLs, ",")
cluster := hec.NewCluster(urls, *splunkToken)
cluster.SetHTTPClient(&http.Client{
Transport: &http.Transport{
// TODO: add flags for TLS
TLSClientConfig: &tls.Config{
// TODO: add flag to enable TLS
InsecureSkipVerify: true,
},
},
})
// gNMI subscription
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
defer close(errChan)
paths := strings.Split(*subscribePaths, ",")
subscribeOptions := &gnmi.SubscribeOptions{
Mode: "stream",
StreamMode: "target_defined",
Paths: gnmi.SplitPaths(paths),
}
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
// Forward subscribe responses to Splunk
for {
select {
// We got a subscribe response
case resp := <-respChan:
response := resp.GetResponse()
update, ok := response.(*pb.SubscribeResponse_Update)
if !ok {
continue
}
// Convert the response into a map[string]interface{}
notification, err := gnmi.NotificationToMap(update.Update)
if err != nil {
exitWithError(err.Error())
}
// Build the Splunk event
path := notification["path"].(string)
delete(notification, "path")
timestamp := notification["timestamp"].(int64)
delete(notification, "timestamp")
// Should this be configurable?
sourceType := "openconfig"
event := &hec.Event{
Host: &addr,
Index: splunkIndex,
Source: &path,
SourceType: &sourceType,
Event: notification,
}
event.SetTime(time.Unix(timestamp/1e9, timestamp%1e9))
// Write the event to Splunk
if err := cluster.WriteEvent(event); err != nil {
exitWithError("failed to write event: " + err.Error())
}
// We got an error
case err := <-errChan:
exitWithError(err.Error())
}
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 434 KiB

View File

@ -1,56 +0,0 @@
# octsdb
This is a client for the OpenConfig gRPC interface that pushes telemetry to
OpenTSDB. Non-numerical data isn't supported by OpenTSDB and is silently
dropped.
This tool requires a config file to specify how to map the path of the
notificatons coming out of the OpenConfig gRPC interface onto OpenTSDB
metric names, and how to extract tags from the path.
## Getting Started
To begin, a list of subscriptions is required (excerpt from `sampleconfig.json`):
```json
"subscriptions": [
"/Sysdb/interface/counter/eth/lag",
"/Sysdb/interface/counter/eth/slice/phy",
"/Sysdb/environment/temperature/status",
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
"/Sysdb/hardware/xcvr/status/all/xcvrStatus"
],
...
```
Note that subscriptions should not end with a trailing `/` as that will cause
the subscription to fail.
Afterwards, the metrics are defined (excerpt from `sampleconfig.json`):
```json
"metrics": {
"tempSensor": {
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/((?:maxT|t)emperature)"
},
...
}
```
In the metrics path, unnamed matched groups are used to make up the metric name, and named matched groups
are used to extract optional tags. Note that unnamed groups are required, otherwise the metric
name will be empty and the update will be silently dropped.
For example, using the above metrics path applied to an update for the path
`/Sysdb/environment/temperature/status/tempSensor/TempSensor1/temperature`
will lead to the metric name `environment.temperature` and tags `sensor=TempSensor1`.
## Usage
See the `-help` output, but here's an example to push all the metrics defined
in the sample config file:
```
octsdb -addr <switch-hostname>:6042 -config sampleconfig.json -text | nc <tsd-hostname> 4242
```

View File

@ -1,93 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"regexp"
"strings"
)
// Config is the representation of octsdb's JSON config file.
type Config struct {
// Prefixes to subscribe to.
Subscriptions []string
// MetricPrefix, if set, is used to prefix all the metric names.
MetricPrefix string
// Metrics to collect and how to munge them.
Metrics map[string]*Metric
}
// A Metric to collect and how to massage it into an OpenTSDB put.
type Metric struct {
// Path is a regexp to match on the Update's full path.
// The regexp must be a prefix match.
// The regexp can define named capture groups to use as tags.
Path string
// Path compiled as a regexp.
re *regexp.Regexp
// Additional tags to add to this metric.
Tags map[string]string
}
func loadConfig(path string) (*Config, error) {
cfg, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("Failed to load config: %v", err)
}
config := new(Config)
err = json.Unmarshal(cfg, config)
if err != nil {
return nil, fmt.Errorf("Failed to parse config: %v", err)
}
for _, metric := range config.Metrics {
metric.re = regexp.MustCompile(metric.Path)
}
return config, nil
}
// Match applies this config to the given OpenConfig path.
// If the path doesn't match anything in the config, an empty string
// is returned as the metric name.
func (c *Config) Match(path string) (metricName string, tags map[string]string) {
tags = make(map[string]string)
for name, metric := range c.Metrics {
found := metric.re.FindStringSubmatch(path)
if found == nil {
continue
}
metricName = name
for i, name := range metric.re.SubexpNames() {
if i == 0 {
continue
} else if name == "" {
if metricName != "" {
metricName += "/"
}
metricName += found[i]
} else {
tags[name] = found[i]
}
}
for tag, value := range metric.Tags {
tags[tag] = value
}
break
}
if metricName != "" {
metricName = strings.ToLower(strings.Replace(metricName, "/", ".", -1))
if c.MetricPrefix != "" {
metricName = c.MetricPrefix + "." + metricName
}
}
return
}

View File

@ -1,76 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"testing"
"github.com/aristanetworks/goarista/test"
)
func TestConfig(t *testing.T) {
cfg, err := loadConfig("/nonexistent.json")
if err == nil {
t.Fatal("Managed to load a nonexistent config!")
}
cfg, err = loadConfig("sampleconfig.json")
if err != nil {
t.Fatal("Failed to load config:", err)
}
testcases := []struct {
path string
metric string
tags map[string]string
}{{
path: "/Sysdb/environment/cooling/status/fan/Fan1/1/speed/value",
metric: "eos.fanspeed.environment.fan.speed",
tags: map[string]string{"fan": "Fan1/1"},
}, {
path: "/Sysdb/environment/power/status/powerSupply/PowerSupply2/outputPower/value",
metric: "eos.powersensor.environment.power.output",
tags: map[string]string{"sensor": "PowerSupply2"},
}, {
path: "/Sysdb/environment/power/status/voltageSensor/VoltageSensor23/voltage/value",
metric: "eos.voltagesensor.environment.voltage",
tags: map[string]string{"sensor": "VoltageSensor23"},
}, {
path: "/Sysdb/environment/power/status/currentSensor/CurrentSensorP2/1/current/value",
metric: "eos.currentsensor.environment.current",
tags: map[string]string{"sensor": "CurrentSensorP2/1"},
}, {
path: "/Sysdb/environment/temperature/status/tempSensor/" +
"TempSensorP2/1/maxTemperature/value",
metric: "eos.tempsensor.environment.maxtemperature",
tags: map[string]string{"sensor": "TempSensorP2/1"},
}, {
path: "/Sysdb/interface/counter/eth/lag/intfCounterDir/" +
"Port-Channel201/intfCounter/current/statistics/outUcastPkts",
metric: "eos.intfpktcounter.interface.pkt",
tags: map[string]string{"intf": "Port-Channel201", "direction": "out", "type": "Ucast"},
}, {
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
"Ethernet42/intfCounter/current/statistics/inUcastPkts",
metric: "eos.intfpktcounter.interface.pkt",
tags: map[string]string{"intf": "Ethernet42", "direction": "in", "type": "Ucast"},
}, {
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
"Ethernet42/intfCounter/lastClear/statistics/inErrors",
}, {
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
"Ethernet42/intfCounter/current/ethStatistics/outPfcClassFrames",
metric: "eos.intfpfcclasscounter.interface.pfcclassframes",
tags: map[string]string{"intf": "Ethernet42", "direction": "out"},
}}
for i, tcase := range testcases {
actualMetric, actualTags := cfg.Match(tcase.path)
if actualMetric != tcase.metric {
t.Errorf("#%d expected metric %q but got %q", i, tcase.metric, actualMetric)
}
if d := test.Diff(tcase.tags, actualTags); actualMetric != "" && d != "" {
t.Errorf("#%d expected tags %q but got %q: %s", i, tcase.tags, actualTags, d)
}
}
}

View File

@ -1,244 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// The octsdb tool pushes OpenConfig telemetry to OpenTSDB.
package main
import (
"context"
"encoding/json"
"flag"
"math"
"os"
"strconv"
"strings"
"time"
"github.com/aristanetworks/goarista/gnmi"
"github.com/aristanetworks/glog"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
func main() {
// gNMI options
cfg := &gnmi.Config{}
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
// Program options
subscribePaths := flag.String("paths", "/", "Comma-separated list of paths to subscribe to")
tsdbFlag := flag.String("tsdb", "",
"Address of the OpenTSDB server where to push telemetry to")
textFlag := flag.Bool("text", false,
"Print the output as simple text")
configFlag := flag.String("config", "",
"Config to turn OpenConfig telemetry into OpenTSDB put requests")
isUDPServerFlag := flag.Bool("isudpserver", false,
"Set to true to run as a UDP to TCP to OpenTSDB server.")
udpAddrFlag := flag.String("udpaddr", "",
"Address of the UDP server to connect to/serve on.")
parityFlag := flag.Int("parityshards", 0,
"Number of parity shards for the Reed Solomon Erasure Coding used for UDP."+
" Clients and servers should have the same number.")
udpTimeoutFlag := flag.Duration("udptimeout", 2*time.Second,
"Timeout for each")
flag.Parse()
if !(*tsdbFlag != "" || *textFlag || *udpAddrFlag != "") {
glog.Fatal("Specify the address of the OpenTSDB server to write to with -tsdb")
} else if *configFlag == "" {
glog.Fatal("Specify a JSON configuration file with -config")
}
config, err := loadConfig(*configFlag)
if err != nil {
glog.Fatal(err)
}
// Ignore the default "subscribe-to-everything" subscription of the
// -subscribe flag.
subscriptions := strings.Split(*subscribePaths, ",")
if subscriptions[0] == "" {
subscriptions = subscriptions[1:]
}
// Add the subscriptions from the config file.
subscriptions = append(subscriptions, config.Subscriptions...)
// Run a UDP server that forwards messages to OpenTSDB via Telnet (TCP)
if *isUDPServerFlag {
if *udpAddrFlag == "" {
glog.Fatal("Specify the address for the UDP server to listen on with -udpaddr")
}
server, err := newUDPServer(*udpAddrFlag, *tsdbFlag, *parityFlag)
if err != nil {
glog.Fatal("Failed to create UDP server: ", err)
}
glog.Fatal(server.Run())
}
var c OpenTSDBConn
if *textFlag {
c = newTextDumper()
} else if *udpAddrFlag != "" {
c = newUDPClient(*udpAddrFlag, *parityFlag, *udpTimeoutFlag)
} else {
// TODO: support HTTP(S).
c = newTelnetClient(*tsdbFlag)
}
ctx := gnmi.NewContext(context.Background(), cfg)
client, err := gnmi.Dial(cfg)
if err != nil {
glog.Fatal(err)
}
respChan := make(chan *pb.SubscribeResponse)
errChan := make(chan error)
subscribeOptions := &gnmi.SubscribeOptions{
Mode: "stream",
StreamMode: "target_defined",
Paths: gnmi.SplitPaths(subscriptions),
}
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
for {
select {
case resp := <-respChan:
pushToOpenTSDB(cfg.Addr, c, config, resp.GetUpdate())
case err := <-errChan:
glog.Fatal(err)
}
}
}
func pushToOpenTSDB(addr string, conn OpenTSDBConn, config *Config, notif *pb.Notification) {
if notif == nil {
glog.Error("Nil notification ignored")
return
}
if notif.Timestamp <= 0 {
glog.Fatalf("Invalid timestamp %d in %s", notif.Timestamp, notif)
}
host := addr[:strings.IndexRune(addr, ':')]
if host == "localhost" {
// TODO: On Linux this reads /proc/sys/kernel/hostname each time,
// which isn't the most efficient, but at least we don't have to
// deal with detecting hostname changes.
host, _ = os.Hostname()
if host == "" {
glog.Info("could not figure out localhost's hostname")
return
}
}
prefix := gnmi.StrPath(notif.Prefix)
for _, update := range notif.Update {
value := parseValue(update)
if value == nil {
continue
}
path := prefix + gnmi.StrPath(update.Path)
metricName, tags := config.Match(path)
if metricName == "" {
glog.V(8).Infof("Ignoring unmatched update at %s with value %+v", path, value)
continue
}
tags["host"] = host
for i, v := range value {
if len(value) > 1 {
tags["index"] = strconv.Itoa(i)
}
err := conn.Put(&DataPoint{
Metric: metricName,
Timestamp: uint64(notif.Timestamp),
Value: v,
Tags: tags,
})
if err != nil {
glog.Info("Failed to put datapoint: ", err)
}
}
}
}
// parseValue returns either an integer/floating point value of the given update, or if
// the value is a slice of integers/floating point values. If the value is neither of these
// or if any element in the slice is non numerical, parseValue returns nil.
func parseValue(update *pb.Update) []interface{} {
value, err := gnmi.ExtractValue(update)
if err != nil {
glog.Fatalf("Malformed JSON update %q in %s", update.Val.GetJsonVal(), update)
}
switch value := value.(type) {
case int64:
return []interface{}{value}
case uint64:
return []interface{}{value}
case float32:
return []interface{}{value}
case *pb.Decimal64:
val := gnmi.DecimalToFloat(value)
if math.IsInf(val, 0) || math.IsNaN(val) {
return nil
}
return []interface{}{val}
case json.Number:
return []interface{}{parseNumber(value, update)}
case []interface{}:
for i, val := range value {
switch val := val.(type) {
case int64:
value[i] = val
case uint64:
value[i] = val
case float32:
value[i] = val
case *pb.Decimal64:
v := gnmi.DecimalToFloat(val)
if math.IsInf(v, 0) || math.IsNaN(v) {
value[i] = nil
}
value[i] = v
case json.Number:
value[i] = parseNumber(val, update)
default:
// If any value is not a number, skip it.
glog.Infof("Element %d: %v is %T, not json.Number", i, val, val)
continue
}
}
return value
case map[string]interface{}:
// Special case for simple value types that just have a "value"
// attribute (common case).
if val, ok := value["value"].(json.Number); ok && len(value) == 1 {
return []interface{}{parseNumber(val, update)}
}
default:
glog.V(9).Infof("Ignoring non-numeric or non-numeric slice value in %s", update)
}
return nil
}
// Convert our json.Number to either an int64, uint64, or float64.
func parseNumber(num json.Number, update *pb.Update) interface{} {
var value interface{}
var err error
if value, err = num.Int64(); err != nil {
// num is either a large unsigned integer or a floating point.
if strings.Contains(err.Error(), "value out of range") { // Sigh.
value, err = strconv.ParseUint(num.String(), 10, 64)
} else {
value, err = num.Float64()
if err != nil {
glog.Fatalf("Malformed JSON number %q in %s", num, update)
}
}
}
return value
}

View File

@ -1,46 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"math"
"testing"
"github.com/aristanetworks/goarista/test"
pb "github.com/openconfig/gnmi/proto/gnmi"
)
func TestParseValue(t *testing.T) { // Because parsing JSON sucks.
testcases := []struct {
input string
expected interface{}
}{
{"42", []interface{}{int64(42)}},
{"-42", []interface{}{int64(-42)}},
{"42.42", []interface{}{float64(42.42)}},
{"-42.42", []interface{}{float64(-42.42)}},
{`"foo"`, []interface{}(nil)},
{"9223372036854775807", []interface{}{int64(math.MaxInt64)}},
{"-9223372036854775808", []interface{}{int64(math.MinInt64)}},
{"9223372036854775808", []interface{}{uint64(math.MaxInt64) + 1}},
{"[1,3,5,7,9]", []interface{}{int64(1), int64(3), int64(5), int64(7), int64(9)}},
{"[1,9223372036854775808,0,-9223372036854775808]", []interface{}{
int64(1),
uint64(math.MaxInt64) + 1,
int64(0),
int64(math.MinInt64)},
},
}
for i, tcase := range testcases {
actual := parseValue(&pb.Update{
Val: &pb.TypedValue{
Value: &pb.TypedValue_JsonVal{JsonVal: []byte(tcase.input)},
},
})
if d := test.Diff(tcase.expected, actual); d != "" {
t.Errorf("#%d: %s: %#v vs %#v", i, d, tcase.expected, actual)
}
}
}

View File

@ -1,54 +0,0 @@
{
"comment": "This is a sample configuration for EOS versions below 4.20",
"subscriptions": [
"/Sysdb/interface/counter/eth/lag",
"/Sysdb/interface/counter/eth/slice/phy",
"/Sysdb/environment/temperature/status",
"/Sysdb/environment/cooling/status",
"/Sysdb/environment/power/status",
"/Sysdb/hardware/xcvr/status/all/xcvrStatus"
],
"metricPrefix": "eos",
"metrics": {
"intfCounter": {
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
},
"intfPktCounter": {
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
},
"intfPfcClassCounter": {
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)"
},
"tempSensor": {
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/((?:maxT|t)emperature)"
},
"tempSensorAlert": {
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/(alertRaisedCount)"
},
"currentSensor": {
"path": "/Sysdb/(environment)/power/status/currentSensor/(?P<sensor>.+)/(current)"
},
"powerSensor": {
"path": "/Sysdb/(environment/power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power"
},
"voltageSensor": {
"path": "/Sysdb/(environment)/power/status/voltageSensor/(?P<sensor>.+)/(voltage)"
},
"railCurrentSensor": {
"path": "/Sysdb/(environment)/power/status/voltageSensor/(?P<sensor>.+)/(current)"
},
"fanSpeed": {
"path": "/Sysdb/(environment)/cooling/status/(fan)/(?P<fan>.+)/(speed)"
},
"qsfpRxPower": {
"path": "/Sysdb/hardware/(xcvr)/status/all/xcvrStatus/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)"
},
"sfpTemperature": {
"path": "/Sysdb/hardware/(xcvr)/status/all/xcvrStatus/(?P<intf>.+)/lastDomUpdateTime/(temperature)"
}
}
}

View File

@ -1,66 +0,0 @@
{
"comment": "This is a sample configuration for EOS versions above 4.20",
"subscriptions": [
"/Smash/counters/ethIntf",
"/Smash/interface/counter/lag/current/counter",
"/Sysdb/environment/archer/cooling/status",
"/Sysdb/environment/archer/power/status",
"/Sysdb/environment/archer/temperature/status",
"/Sysdb/hardware/archer/xcvr/status"
],
"metricPrefix": "eos",
"metrics": {
"intfCounter": {
"path": "/Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
},
"intfLagCounter": {
"path": "/Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
},
"intfPktCounter": {
"path": "/Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
},
"intfLagPktCounter": {
"path": "/Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
},
"intfPfcClassCounter": {
"path": "/Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)"
},
"tempSensor": {
"path": "/Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/((?:maxT|t)emperature)"
},
"tempSensorAlert": {
"path": "/Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/(alertRaisedCount)"
},
"currentSensor": {
"path": "/Sysdb/(environment)/archer/power/status/currentSensor/(?P<sensor>.+)/(current)"
},
"powerSensor": {
"path": "/Sysdb/(environment)/archer/(power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power"
},
"voltageSensor": {
"path": "/Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(voltage)"
},
"railCurrentSensor": {
"path": "/Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(current)"
},
"fanSpeed": {
"path": "/Sysdb/(environment)/archer/(cooling)/status/(?P<fan>.+)/speed"
},
"qsfpModularRxPower": {
"path": "/Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)"
},
"qsfpFixedRxPower": {
"path": "/Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)"
},
"sfpModularTemperature": {
"path": "/Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/lastDomUpdateTime/(temperature)"
},
"sfpFixedTemperature": {
"path": "/Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/lastDomUpdateTime/(temperature)"
}
}
}

View File

@ -1,65 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"bytes"
"net"
"github.com/aristanetworks/glog"
)
type telnetClient struct {
addr string
conn net.Conn
}
func newTelnetClient(addr string) OpenTSDBConn {
return &telnetClient{
addr: addr,
}
}
func readErrors(conn net.Conn) {
var buf [4096]byte
for {
// TODO: We should add a buffer to read line-by-line properly instead
// of using a fixed-size buffer and splitting on newlines manually.
n, err := conn.Read(buf[:])
if n == 0 {
return
} else if n > 0 {
for _, line := range bytes.Split(buf[:n], []byte{'\n'}) {
if s := string(line); s != "" {
glog.Info("tsd replied: ", s)
}
}
}
if err != nil {
return
}
}
}
func (c *telnetClient) Put(d *DataPoint) error {
return c.PutBytes([]byte(d.String()))
}
func (c *telnetClient) PutBytes(d []byte) error {
var err error
if c.conn == nil {
c.conn, err = net.Dial("tcp", c.addr)
if err != nil {
return err
}
go readErrors(c.conn)
}
_, err = c.conn.Write(d)
if err != nil {
c.conn.Close()
c.conn = nil
}
return err
}

View File

@ -1,16 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
type textDumper struct{}
func newTextDumper() OpenTSDBConn {
return textDumper{}
}
func (t textDumper) Put(d *DataPoint) error {
print(d.String())
return nil
}

View File

@ -1,37 +0,0 @@
// Copyright (c) 2016 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import "fmt"
// DataPoint for OpenTSDB to store.
type DataPoint struct {
// Metric name.
Metric string `json:"metric"`
// UNIX timestamp with millisecond resolution.
Timestamp uint64 `json:"timestamp"`
// Value of the data point (integer or floating point).
Value interface{} `json:"value"`
// Tags. The host is automatically populated by the OpenTSDBConn.
Tags map[string]string `json:"tags"`
}
func (d *DataPoint) String() string {
var tags string
if len(d.Tags) != 0 {
for tag, value := range d.Tags {
tags += " " + tag + "=" + value
}
}
return fmt.Sprintf("put %s %d %v%s\n", d.Metric, d.Timestamp/1e9, d.Value, tags)
}
// OpenTSDBConn is a managed connection to an OpenTSDB instance (or cluster).
type OpenTSDBConn interface {
Put(d *DataPoint) error
}

View File

@ -1,104 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"math/rand"
"time"
"github.com/aristanetworks/glog"
kcp "github.com/xtaci/kcp-go"
)
type udpClient struct {
addr string
conn *kcp.UDPSession
parity int
timeout time.Duration
}
func newUDPClient(addr string, parity int, timeout time.Duration) OpenTSDBConn {
return &udpClient{
addr: addr,
parity: parity,
timeout: timeout,
}
}
func (c *udpClient) Put(d *DataPoint) error {
var err error
if c.conn == nil {
// Prevent a bunch of clients all disconnecting and attempting to reconnect
// at nearly the same time.
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
c.conn, err = kcp.DialWithOptions(c.addr, nil, 10, c.parity)
if err != nil {
return err
}
c.conn.SetNoDelay(1, 40, 1, 1) // Suggested by kcp-go to lower cpu usage
}
dStr := d.String()
glog.V(3).Info(dStr)
c.conn.SetWriteDeadline(time.Now().Add(c.timeout))
_, err = c.conn.Write([]byte(dStr))
if err != nil {
c.conn.Close()
c.conn = nil
}
return err
}
type udpServer struct {
lis *kcp.Listener
telnet *telnetClient
}
func newUDPServer(udpAddr, tsdbAddr string, parity int) (*udpServer, error) {
lis, err := kcp.ListenWithOptions(udpAddr, nil, 10, parity)
if err != nil {
return nil, err
}
return &udpServer{
lis: lis,
telnet: newTelnetClient(tsdbAddr).(*telnetClient),
}, nil
}
func (c *udpServer) Run() error {
for {
conn, err := c.lis.AcceptKCP()
if err != nil {
return err
}
conn.SetNoDelay(1, 40, 1, 1) // Suggested by kcp-go to lower cpu usage
if glog.V(3) {
glog.Infof("New connection from %s", conn.RemoteAddr())
}
go func() {
defer conn.Close()
var buf [4096]byte
for {
n, err := conn.Read(buf[:])
if err != nil {
if n != 0 { // Not EOF
glog.Error(err)
}
return
}
if glog.V(3) {
glog.Info(string(buf[:n]))
}
err = c.telnet.PutBytes(buf[:n])
if err != nil {
glog.Error(err)
return
}
}
}()
}
}

View File

@ -1,3 +0,0 @@
# openconfigbeat
The code for `openconfigbeat` lives at [aristanetworks/openconfigbeat](https://github.com/aristanetworks/openconfigbeat).

View File

@ -1,517 +0,0 @@
// Copyright (c) 2018 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// test2influxdb writes results from 'go test -json' to an influxdb
// database.
//
// Example usage:
//
// go test -json | test2influxdb [options...]
//
// Points are written to influxdb with tags:
//
// package
// type "package" for a package result; "test" for a test result
// Additional tags set by -tags flag
//
// And fields:
//
// test string // "NONE" for whole package results
// elapsed float64 // in seconds
// pass float64 // 1 for PASS, 0 for FAIL
// Additional fields set by -fields flag
//
// "test" is a field instead of a tag to reduce cardinality of data.
//
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/aristanetworks/glog"
client "github.com/influxdata/influxdb1-client/v2"
"golang.org/x/tools/benchmark/parse"
)
const (
// Benchmark field names
fieldNsPerOp = "nsPerOp"
fieldAllocedBytesPerOp = "allocedBytesPerOp"
fieldAllocsPerOp = "allocsPerOp"
fieldMBPerS = "MBPerSec"
)
type tag struct {
key string
value string
}
type tags []tag
func (ts *tags) String() string {
s := make([]string, len(*ts))
for i, t := range *ts {
s[i] = t.key + "=" + t.value
}
return strings.Join(s, ",")
}
func (ts *tags) Set(s string) error {
for _, fieldString := range strings.Split(s, ",") {
kv := strings.Split(fieldString, "=")
if len(kv) != 2 {
return fmt.Errorf("invalid tag, expecting one '=': %q", fieldString)
}
key := strings.TrimSpace(kv[0])
if key == "" {
return fmt.Errorf("invalid tag key %q in %q", key, fieldString)
}
val := strings.TrimSpace(kv[1])
if val == "" {
return fmt.Errorf("invalid tag value %q in %q", val, fieldString)
}
*ts = append(*ts, tag{key: key, value: val})
}
return nil
}
type field struct {
key string
value interface{}
}
type fields []field
func (fs *fields) String() string {
s := make([]string, len(*fs))
for i, f := range *fs {
var valString string
switch v := f.value.(type) {
case bool:
valString = strconv.FormatBool(v)
case float64:
valString = strconv.FormatFloat(v, 'f', -1, 64)
case int64:
valString = strconv.FormatInt(v, 10) + "i"
case string:
valString = v
}
s[i] = f.key + "=" + valString
}
return strings.Join(s, ",")
}
func (fs *fields) Set(s string) error {
for _, fieldString := range strings.Split(s, ",") {
kv := strings.Split(fieldString, "=")
if len(kv) != 2 {
return fmt.Errorf("invalid field, expecting one '=': %q", fieldString)
}
key := strings.TrimSpace(kv[0])
if key == "" {
return fmt.Errorf("invalid field key %q in %q", key, fieldString)
}
val := strings.TrimSpace(kv[1])
if val == "" {
return fmt.Errorf("invalid field value %q in %q", val, fieldString)
}
var value interface{}
var err error
if value, err = strconv.ParseBool(val); err == nil {
// It's a bool
} else if value, err = strconv.ParseFloat(val, 64); err == nil {
// It's a float64
} else if value, err = strconv.ParseInt(val[:len(val)-1], 0, 64); err == nil &&
val[len(val)-1] == 'i' {
// ints are suffixed with an "i"
} else {
value = val
}
*fs = append(*fs, field{key: key, value: value})
}
return nil
}
var (
flagAddr = flag.String("addr", "http://localhost:8086", "adddress of influxdb database")
flagDB = flag.String("db", "gotest", "use `database` in influxdb")
flagMeasurement = flag.String("m", "result", "`measurement` used in influxdb database")
flagBenchOnly = flag.Bool("bench", false, "if true, parses and stores benchmark "+
"output only while ignoring test results")
flagTags tags
flagFields fields
)
type duplicateTestsErr map[string][]string // package to tests
func (dte duplicateTestsErr) Error() string {
var b bytes.Buffer
if _, err := b.WriteString("duplicate tests found:"); err != nil {
panic(err)
}
for pkg, tests := range dte {
if _, err := b.WriteString(
fmt.Sprintf("\n\t%s: %s", pkg, strings.Join(tests, " ")),
); err != nil {
panic(err)
}
}
return b.String()
}
func init() {
flag.Var(&flagTags, "tags", "set additional `tags`. Ex: name=alice,food=pasta")
flag.Var(&flagFields, "fields", "set additional `fields`. Ex: id=1234i,long=34.123,lat=72.234")
}
func main() {
flag.Parse()
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: *flagAddr,
})
if err != nil {
glog.Fatal(err)
}
if err := run(c, os.Stdin); err != nil {
glog.Fatal(err)
}
}
func run(c client.Client, r io.Reader) error {
batch, err := client.NewBatchPoints(client.BatchPointsConfig{Database: *flagDB})
if err != nil {
return err
}
var parseErr error
if *flagBenchOnly {
parseErr = parseBenchmarkOutput(r, batch)
} else {
parseErr = parseTestOutput(r, batch)
}
// Partial results can still be published with certain parsing errors like
// duplicate test names.
// The process still exits with a non-zero code in this case.
switch parseErr.(type) {
case nil, duplicateTestsErr:
if err := c.Write(batch); err != nil {
return err
}
glog.Infof("wrote %d data points", len(batch.Points()))
}
return parseErr
}
// See https://golang.org/cmd/test2json/ for a description of 'go test
// -json' output
type testEvent struct {
Time time.Time // encodes as an RFC3339-format string
Action string
Package string
Test string
Elapsed float64 // seconds
Output string
}
func createTags(e *testEvent) map[string]string {
tags := make(map[string]string, len(flagTags)+2)
for _, t := range flagTags {
tags[t.key] = t.value
}
resultType := "test"
if e.Test == "" {
resultType = "package"
}
tags["package"] = e.Package
tags["type"] = resultType
return tags
}
func createFields(e *testEvent) map[string]interface{} {
fields := make(map[string]interface{}, len(flagFields)+3)
for _, f := range flagFields {
fields[f.key] = f.value
}
// Use a float64 instead of a bool to be able to SUM test
// successes in influxdb.
var pass float64
if e.Action == "pass" {
pass = 1
}
fields["pass"] = pass
fields["elapsed"] = e.Elapsed
if e.Test != "" {
fields["test"] = e.Test
}
return fields
}
func parseTestOutput(r io.Reader, batch client.BatchPoints) error {
// pkgs holds packages seen in r. Unfortunately, if a test panics,
// then there is no "fail" result from a package. To detect these
// kind of failures, keep track of all the packages that never had
// a "pass" or "fail".
//
// The last seen timestamp is stored with the package, so that
// package result measurement written to influxdb can be later
// than any test result for that package.
pkgs := make(map[string]time.Time)
d := json.NewDecoder(r)
for {
e := &testEvent{}
if err := d.Decode(e); err != nil {
if err != io.EOF {
return err
}
break
}
switch e.Action {
case "pass", "fail":
default:
continue
}
if e.Test == "" {
// A package has completed.
delete(pkgs, e.Package)
} else {
pkgs[e.Package] = e.Time
}
point, err := client.NewPoint(
*flagMeasurement,
createTags(e),
createFields(e),
e.Time,
)
if err != nil {
return err
}
batch.AddPoint(point)
}
for pkg, t := range pkgs {
pkgFail := &testEvent{
Action: "fail",
Package: pkg,
}
point, err := client.NewPoint(
*flagMeasurement,
createTags(pkgFail),
createFields(pkgFail),
// Fake a timestamp that is later than anything that
// occurred for this package
t.Add(time.Millisecond),
)
if err != nil {
return err
}
batch.AddPoint(point)
}
return nil
}
func createBenchmarkTags(pkg string, b *parse.Benchmark) map[string]string {
tags := make(map[string]string, len(flagTags)+2)
for _, t := range flagTags {
tags[t.key] = t.value
}
tags["package"] = pkg
tags["benchmark"] = b.Name
return tags
}
func createBenchmarkFields(b *parse.Benchmark) map[string]interface{} {
fields := make(map[string]interface{}, len(flagFields)+4)
for _, f := range flagFields {
fields[f.key] = f.value
}
if b.Measured&parse.NsPerOp != 0 {
fields[fieldNsPerOp] = b.NsPerOp
}
if b.Measured&parse.AllocedBytesPerOp != 0 {
fields[fieldAllocedBytesPerOp] = float64(b.AllocedBytesPerOp)
}
if b.Measured&parse.AllocsPerOp != 0 {
fields[fieldAllocsPerOp] = float64(b.AllocsPerOp)
}
if b.Measured&parse.MBPerS != 0 {
fields[fieldMBPerS] = b.MBPerS
}
return fields
}
func parseBenchmarkOutput(r io.Reader, batch client.BatchPoints) error {
// Unfortunately, test2json is not very reliable when it comes to benchmarks. At least
// the following issues exist:
//
// - It doesn't guarantee a "pass" action for each successful benchmark test
// - It might misreport the name of a benchmark (i.e. "Test" field)
// See https://github.com/golang/go/issues/27764.
// This happens for example when a benchmark panics: it might use the name
// of the preceeding benchmark from the same package that run
//
// The main useful element of the json data is that it separates the output by package,
// which complements the features in https://godoc.org/golang.org/x/tools/benchmark/parse
// Non-benchmark output from libraries like glog can interfere with benchmark result
// parsing. filterOutputLine tries to filter out this extraneous info.
// It returns a tuple with the output to parse and the name of the benchmark
// if it is in the testEvent.
filterOutputLine := func(e *testEvent) (string, string) {
// The benchmark name is in the output of a separate test event.
// It may be suffixed with non-benchmark-related logged output.
// So if e.Output is
// "BenchmarkFoo \tIrrelevant output"
// then here we return
// "BenchmarkFoo \t"
if strings.HasPrefix(e.Output, "Benchmark") {
if split := strings.SplitAfterN(e.Output, "\t", 2); len(split) == 2 {
// Filter out output like "Benchmarking foo\t"
if words := strings.Fields(split[0]); len(words) == 1 {
return split[0], words[0]
}
}
}
if strings.Contains(e.Output, "ns/op\t") {
return e.Output, ""
}
if strings.Contains(e.Output, "B/op\t") {
return e.Output, ""
}
if strings.Contains(e.Output, "allocs/op\t") {
return e.Output, ""
}
if strings.Contains(e.Output, "MB/s\t") {
return e.Output, ""
}
return "", ""
}
// Extract output per package.
type pkgOutput struct {
output bytes.Buffer
timestamps map[string]time.Time
}
outputByPkg := make(map[string]*pkgOutput)
d := json.NewDecoder(r)
for {
e := &testEvent{}
if err := d.Decode(e); err != nil {
if err != io.EOF {
return err
}
break
}
if e.Package == "" {
return fmt.Errorf("empty package name for event %v", e)
}
if e.Time.IsZero() {
return fmt.Errorf("zero timestamp for event %v", e)
}
line, bname := filterOutputLine(e)
if line == "" {
continue
}
po, ok := outputByPkg[e.Package]
if !ok {
po = &pkgOutput{timestamps: make(map[string]time.Time)}
outputByPkg[e.Package] = po
}
po.output.WriteString(line)
if bname != "" {
po.timestamps[bname] = e.Time
}
}
// Extract benchmark info from output
type pkgBenchmarks struct {
benchmarks []*parse.Benchmark
timestamps map[string]time.Time
}
benchmarksPerPkg := make(map[string]*pkgBenchmarks)
dups := make(duplicateTestsErr)
for pkg, po := range outputByPkg {
glog.V(5).Infof("Package %s output:\n%s", pkg, &po.output)
set, err := parse.ParseSet(&po.output)
if err != nil {
return fmt.Errorf("error parsing package %s: %s", pkg, err)
}
for name, benchmarks := range set {
switch len(benchmarks) {
case 0:
case 1:
pb, ok := benchmarksPerPkg[pkg]
if !ok {
pb = &pkgBenchmarks{timestamps: po.timestamps}
benchmarksPerPkg[pkg] = pb
}
pb.benchmarks = append(pb.benchmarks, benchmarks[0])
default:
dups[pkg] = append(dups[pkg], name)
}
}
}
// Add a point per benchmark
for pkg, pb := range benchmarksPerPkg {
for _, bm := range pb.benchmarks {
t, ok := pb.timestamps[bm.Name]
if !ok {
return fmt.Errorf("implementation error: no timestamp for benchmark %s "+
"in package %s", bm.Name, pkg)
}
tags := createBenchmarkTags(pkg, bm)
fields := createBenchmarkFields(bm)
point, err := client.NewPoint(
*flagMeasurement,
tags,
fields,
t,
)
if err != nil {
return err
}
batch.AddPoint(point)
glog.V(5).Infof("point: %s", point)
}
}
glog.Infof("Parsed %d benchmarks from %d packages",
len(batch.Points()), len(benchmarksPerPkg))
if len(dups) > 0 {
return dups
}
return nil
}

View File

@ -1,277 +0,0 @@
// Copyright (c) 2018 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package main
import (
"os"
"testing"
"time"
"github.com/aristanetworks/goarista/test"
client "github.com/influxdata/influxdb1-client/v2"
)
type mockedConn struct {
bp client.BatchPoints
}
func (m *mockedConn) Ping(timeout time.Duration) (time.Duration, string, error) {
return time.Duration(0), "", nil
}
func (m *mockedConn) Write(bp client.BatchPoints) error {
m.bp = bp
return nil
}
func (m *mockedConn) Query(q client.Query) (*client.Response, error) {
return nil, nil
}
func (m *mockedConn) QueryAsChunk(q client.Query) (*client.ChunkedResponse, error) {
return nil, nil
}
func (m *mockedConn) Close() error {
return nil
}
func newPoint(t *testing.T, measurement string, tags map[string]string,
fields map[string]interface{}, timeString string) *client.Point {
t.Helper()
timestamp, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
t.Fatal(err)
}
p, err := client.NewPoint(measurement, tags, fields, timestamp)
if err != nil {
t.Fatal(err)
}
return p
}
func TestRunWithTestData(t *testing.T) {
// Verify tags and fields set by flags are set in records
flagTags.Set("tag=foo")
flagFields.Set("field=true")
defer func() {
flagTags = nil
flagFields = nil
}()
f, err := os.Open("testdata/output.txt")
if err != nil {
t.Fatal(err)
}
defer f.Close()
makeTags := func(pkg, resultType string) map[string]string {
return map[string]string{"package": pkg, "type": resultType, "tag": "foo"}
}
makeFields := func(pass, elapsed float64, test string) map[string]interface{} {
m := map[string]interface{}{"pass": pass, "elapsed": elapsed, "field": true}
if test != "" {
m["test"] = test
}
return m
}
expected := []*client.Point{
newPoint(t,
"result",
makeTags("pkg/passed", "test"),
makeFields(1, 0, "TestPass"),
"2018-03-08T10:33:12.344165231-08:00",
),
newPoint(t,
"result",
makeTags("pkg/passed", "package"),
makeFields(1, 0.013, ""),
"2018-03-08T10:33:12.34533033-08:00",
),
newPoint(t,
"result",
makeTags("pkg/panic", "test"),
makeFields(0, 600.029, "TestPanic"),
"2018-03-08T10:33:20.272440286-08:00",
),
newPoint(t,
"result",
makeTags("pkg/failed", "test"),
makeFields(0, 0.18, "TestFail"),
"2018-03-08T10:33:27.158860934-08:00",
),
newPoint(t,
"result",
makeTags("pkg/failed", "package"),
makeFields(0, 0.204, ""),
"2018-03-08T10:33:27.161302093-08:00",
),
newPoint(t,
"result",
makeTags("pkg/panic", "package"),
makeFields(0, 0, ""),
"2018-03-08T10:33:20.273440286-08:00",
),
}
var mc mockedConn
if err := run(&mc, f); err != nil {
t.Fatal(err)
}
if diff := test.Diff(expected, mc.bp.Points()); diff != "" {
t.Errorf("unexpected diff: %s", diff)
}
}
func TestTagsFlag(t *testing.T) {
for tc, expected := range map[string]tags{
"abc=def": tags{tag{key: "abc", value: "def"}},
"abc=def,ghi=klm": tags{tag{key: "abc", value: "def"}, tag{key: "ghi", value: "klm"}},
} {
t.Run(tc, func(t *testing.T) {
var ts tags
ts.Set(tc)
if diff := test.Diff(expected, ts); diff != "" {
t.Errorf("unexpected diff from Set: %s", diff)
}
if s := ts.String(); s != tc {
t.Errorf("unexpected diff from String: %q vs. %q", tc, s)
}
})
}
}
func TestFieldsFlag(t *testing.T) {
for tc, expected := range map[string]fields{
"str=abc": fields{field{key: "str", value: "abc"}},
"bool=true": fields{field{key: "bool", value: true}},
"bool=false": fields{field{key: "bool", value: false}},
"float64=42": fields{field{key: "float64", value: float64(42)}},
"float64=42.123": fields{field{key: "float64", value: float64(42.123)}},
"int64=42i": fields{field{key: "int64", value: int64(42)}},
"str=abc,bool=true,float64=42,int64=42i": fields{field{key: "str", value: "abc"},
field{key: "bool", value: true},
field{key: "float64", value: float64(42)},
field{key: "int64", value: int64(42)}},
} {
t.Run(tc, func(t *testing.T) {
var fs fields
fs.Set(tc)
if diff := test.Diff(expected, fs); diff != "" {
t.Errorf("unexpected diff from Set: %s", diff)
}
if s := fs.String(); s != tc {
t.Errorf("unexpected diff from String: %q vs. %q", tc, s)
}
})
}
}
func TestRunWithBenchmarkData(t *testing.T) {
// Verify tags and fields set by flags are set in records
flagTags.Set("tag=foo")
flagFields.Set("field=true")
defaultMeasurement := *flagMeasurement
*flagMeasurement = "benchmarks"
*flagBenchOnly = true
defer func() {
flagTags = nil
flagFields = nil
*flagMeasurement = defaultMeasurement
*flagBenchOnly = false
}()
f, err := os.Open("testdata/bench-output.txt")
if err != nil {
t.Fatal(err)
}
defer f.Close()
makeTags := func(pkg, benchmark string) map[string]string {
return map[string]string{
"package": pkg,
"benchmark": benchmark,
"tag": "foo",
}
}
makeFields := func(nsPerOp, mbPerS, bPerOp, allocsPerOp float64) map[string]interface{} {
m := map[string]interface{}{
"field": true,
}
if nsPerOp > 0 {
m[fieldNsPerOp] = nsPerOp
}
if mbPerS > 0 {
m[fieldMBPerS] = mbPerS
}
if bPerOp > 0 {
m[fieldAllocedBytesPerOp] = bPerOp
}
if allocsPerOp > 0 {
m[fieldAllocsPerOp] = allocsPerOp
}
return m
}
expected := []*client.Point{
newPoint(t,
"benchmarks",
makeTags("arista/pkg", "BenchmarkPassed-8"),
makeFields(127, 0, 16, 1),
"2018-11-08T15:53:12.935603594-08:00",
),
newPoint(t,
"benchmarks",
makeTags("arista/pkg/subpkg1", "BenchmarkLogged-8"),
makeFields(120, 0, 16, 1),
"2018-11-08T15:53:14.359792815-08:00",
),
newPoint(t,
"benchmarks",
makeTags("arista/pkg/subpkg2", "BenchmarkSetBytes-8"),
makeFields(120, 8.31, 16, 1),
"2018-11-08T15:53:15.717036333-08:00",
),
newPoint(t,
"benchmarks",
makeTags("arista/pkg/subpkg3", "BenchmarkWithSubs/sub_1-8"),
makeFields(118, 0, 16, 1),
"2018-11-08T15:53:17.952644273-08:00",
),
newPoint(t,
"benchmarks",
makeTags("arista/pkg/subpkg3", "BenchmarkWithSubs/sub_2-8"),
makeFields(117, 0, 16, 1),
"2018-11-08T15:53:20.443187742-08:00",
),
}
var mc mockedConn
err = run(&mc, f)
switch err.(type) {
case duplicateTestsErr:
default:
t.Fatal(err)
}
// parseBenchmarkOutput arranges the data in maps so the generated points
// are in random order. Therefore, we're diffing as map instead of a slice
pointsAsMap := func(points []*client.Point) map[string]*client.Point {
m := make(map[string]*client.Point, len(points))
for _, p := range points {
m[p.String()] = p
}
return m
}
expectedMap := pointsAsMap(expected)
actualMap := pointsAsMap(mc.bp.Points())
if diff := test.Diff(expectedMap, actualMap); diff != "" {
t.Errorf("unexpected diff: %s\nexpected: %v\nactual: %v", diff, expectedMap, actualMap)
}
}

View File

@ -1,78 +0,0 @@
{"Time":"2018-11-08T15:53:12.935037854-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"--- FAIL: BenchmarkFailed\n"}
{"Time":"2018-11-08T15:53:12.935531137-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"goos: darwin\n"}
{"Time":"2018-11-08T15:53:12.93555869-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"goarch: amd64\n"}
{"Time":"2018-11-08T15:53:12.935580755-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"pkg: arista/pkg\n"}
{"Time":"2018-11-08T15:53:12.935603594-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"BenchmarkPassed-8 \t"}
{"Time":"2018-11-08T15:53:14.335388337-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"10000000\t 127 ns/op\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:14.335478711-08:00","Action":"fail","Package":"arista/pkg","Test":"BenchmarkFailed"}
{"Time":"2018-11-08T15:53:14.335497629-08:00","Action":"output","Package":"arista/pkg","Output":"FAIL\n"}
{"Time":"2018-11-08T15:53:14.337028608-08:00","Action":"output","Package":"arista/pkg","Output":"exit status 1\n"}
{"Time":"2018-11-08T15:53:14.337084646-08:00","Action":"output","Package":"arista/pkg","Output":"FAIL\tarista/pkg\t2.044s\n"}
{"Time":"2018-11-08T15:53:14.33710102-08:00","Action":"fail","Package":"arista/pkg","Elapsed":2.044}
{"Time":"2018-11-08T15:53:14.359620241-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.359313 66704 logged_test.go:12] glog info\n"}
{"Time":"2018-11-08T15:53:14.359712288-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.359462 66704 logged_test.go:13] glog error\n"}
{"Time":"2018-11-08T15:53:14.359732074-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"goos: darwin\n"}
{"Time":"2018-11-08T15:53:14.359745657-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"goarch: amd64\n"}
{"Time":"2018-11-08T15:53:14.359762336-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"pkg: arista/pkg/subpkg1\n"}
{"Time":"2018-11-08T15:53:14.359792815-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"BenchmarkLogged-8 \tI1108 15:53:14.359720 66704 logged_test.go:12] glog info\n"}
{"Time":"2018-11-08T15:53:14.359815431-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.359727 66704 logged_test.go:13] glog error\n"}
{"Time":"2018-11-08T15:53:14.360021406-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.359982 66704 logged_test.go:12] glog info\n"}
{"Time":"2018-11-08T15:53:14.360051256-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.359999 66704 logged_test.go:13] glog error\n"}
{"Time":"2018-11-08T15:53:14.361552861-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.361525 66704 logged_test.go:12] glog info\n"}
{"Time":"2018-11-08T15:53:14.361590887-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.361533 66704 logged_test.go:13] glog error\n"}
{"Time":"2018-11-08T15:53:14.487176503-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.487111 66704 logged_test.go:12] glog info\n"}
{"Time":"2018-11-08T15:53:14.487247658-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.487134 66704 logged_test.go:13] glog error\n"}
{"Time":"2018-11-08T15:53:15.689641721-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"10000000\t 120 ns/op\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:15.689748801-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"--- BENCH: BenchmarkLogged-8\n"}
{"Time":"2018-11-08T15:53:15.689816801-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
{"Time":"2018-11-08T15:53:15.689863374-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
{"Time":"2018-11-08T15:53:15.689912578-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
{"Time":"2018-11-08T15:53:15.689935701-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
{"Time":"2018-11-08T15:53:15.689956703-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
{"Time":"2018-11-08T15:53:15.6900942-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"BenchmarkPanicked-8 \t"}
{"Time":"2018-11-08T15:53:15.692466953-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"panic: panicking\n"}
{"Time":"2018-11-08T15:53:15.692555542-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\n"}
{"Time":"2018-11-08T15:53:15.692608705-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"goroutine 10 [running]:\n"}
{"Time":"2018-11-08T15:53:15.692641147-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"arista/pkg/subpkg1.BenchmarkPanicked(0xc4200ecf00)\n"}
{"Time":"2018-11-08T15:53:15.69266629-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/Users/rprada/go/src/arista/pkg/subpkg1/panicked_test.go:6 +0x39\n"}
{"Time":"2018-11-08T15:53:15.692697166-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"testing.(*B).runN(0xc4200ecf00, 0x1)\n"}
{"Time":"2018-11-08T15:53:15.69271775-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/usr/local/Cellar/go/1.10.2/libexec/src/testing/benchmark.go:141 +0xb2\n"}
{"Time":"2018-11-08T15:53:15.692738882-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"testing.(*B).run1.func1(0xc4200ecf00)\n"}
{"Time":"2018-11-08T15:53:15.692756307-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/usr/local/Cellar/go/1.10.2/libexec/src/testing/benchmark.go:214 +0x5a\n"}
{"Time":"2018-11-08T15:53:15.692773483-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"created by testing.(*B).run1\n"}
{"Time":"2018-11-08T15:53:15.69279028-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/usr/local/Cellar/go/1.10.2/libexec/src/testing/benchmark.go:207 +0x80\n"}
{"Time":"2018-11-08T15:53:15.694213193-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"exit status 2\n"}
{"Time":"2018-11-08T15:53:15.694269655-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"FAIL\tarista/pkg/subpkg1\t1.356s\n"}
{"Time":"2018-11-08T15:53:15.694289541-08:00","Action":"fail","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Elapsed":1.3559999999999999}
{"Time":"2018-11-08T15:53:15.716909476-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"goos: darwin\n"}
{"Time":"2018-11-08T15:53:15.71699479-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"goarch: amd64\n"}
{"Time":"2018-11-08T15:53:15.717015107-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"pkg: arista/pkg/subpkg2\n"}
{"Time":"2018-11-08T15:53:15.717036333-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"BenchmarkSetBytes-8 \t"}
{"Time":"2018-11-08T15:53:17.044040116-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"10000000\t 120 ns/op\t 8.31 MB/s\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:17.044540811-08:00","Action":"output","Package":"arista/pkg/subpkg2","Test":"BenchmarkSkipped","Output":"--- SKIP: BenchmarkSkipped\n"}
{"Time":"2018-11-08T15:53:17.044584422-08:00","Action":"output","Package":"arista/pkg/subpkg2","Test":"BenchmarkSkipped","Output":"\tskipped_test.go:6: skipping\n"}
{"Time":"2018-11-08T15:53:17.044626843-08:00","Action":"skip","Package":"arista/pkg/subpkg2","Test":"BenchmarkSkipped"}
{"Time":"2018-11-08T15:53:17.044642947-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"PASS\n"}
{"Time":"2018-11-08T15:53:17.04621203-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"ok \tarista/pkg/subpkg2\t1.350s\n"}
{"Time":"2018-11-08T15:53:17.046280101-08:00","Action":"pass","Package":"arista/pkg/subpkg2","Elapsed":1.351}
{"Time":"2018-11-08T15:53:17.952468407-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"goos: darwin\n"}
{"Time":"2018-11-08T15:53:17.952573911-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"goarch: amd64\n"}
{"Time":"2018-11-08T15:53:17.952607727-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"pkg: arista/pkg/subpkg3\n"}
{"Time":"2018-11-08T15:53:17.952644273-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"BenchmarkWithSubs/sub_1-8 \t"}
{"Time":"2018-11-08T15:53:20.442803179-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"20000000\t 118 ns/op\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:20.443187742-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"BenchmarkWithSubs/sub_2-8 \t"}
{"Time":"2018-11-08T15:53:21.743033457-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"10000000\t 117 ns/op\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:21.743118494-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"PASS\n"}
{"Time":"2018-11-08T15:53:21.744485534-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"ok \tarista/pkg/subpkg3\t4.697s\n"}
{"Time":"2018-11-08T15:53:21.744547934-08:00","Action":"pass","Package":"arista/pkg/subpkg3","Elapsed":4.697}
{"Time":"2018-11-08T15:53:22.952573911-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goos: darwin\n"}
{"Time":"2018-11-08T15:53:22.952607727-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goarch: amd64\n"}
{"Time":"2018-11-08T15:53:22.952644273-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"subpkg4: arista/subpkg4\n"}
{"Time":"2018-11-08T15:53:23.442803179-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"BenchmarkDuplicate-8 \t"}
{"Time":"2018-11-08T15:53:23.443187742-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"10000000\t 127 ns/op\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:24.952573911-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goos: darwin\n"}
{"Time":"2018-11-08T15:53:24.952607727-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goarch: amd64\n"}
{"Time":"2018-11-08T15:53:24.952644273-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"subpkg4: arista/subpkg4\n"}
{"Time":"2018-11-08T15:53:25.442803179-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"BenchmarkDuplicate-8 \t"}
{"Time":"2018-11-08T15:53:25.443187742-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"10000000\t 127 ns/op\t 16 B/op\t 1 allocs/op\n"}
{"Time":"2018-11-08T15:53:25.744547934-08:00","Action":"pass","Package":"arista/pkg/subpkg4","Elapsed":4.697}

View File

@ -1,15 +0,0 @@
{"Time":"2018-03-08T10:33:12.002692769-08:00","Action":"output","Package":"pkg/skipped","Output":"? \tpkg/skipped\t[no test files]\n"}
{"Time":"2018-03-08T10:33:12.003199228-08:00","Action":"skip","Package":"pkg/skipped","Elapsed":0.001}
{"Time":"2018-03-08T10:33:12.343866281-08:00","Action":"run","Package":"pkg/passed","Test":"TestPass"}
{"Time":"2018-03-08T10:33:12.34406622-08:00","Action":"output","Package":"pkg/passed","Test":"TestPass","Output":"=== RUN TestPass\n"}
{"Time":"2018-03-08T10:33:12.344139342-08:00","Action":"output","Package":"pkg/passed","Test":"TestPass","Output":"--- PASS: TestPass (0.00s)\n"}
{"Time":"2018-03-08T10:33:12.344165231-08:00","Action":"pass","Package":"pkg/passed","Test":"TestPass","Elapsed":0}
{"Time":"2018-03-08T10:33:12.344297059-08:00","Action":"output","Package":"pkg/passed","Output":"PASS\n"}
{"Time":"2018-03-08T10:33:12.345217622-08:00","Action":"output","Package":"pkg/passed","Output":"ok \tpkg/passed\t0.013s\n"}
{"Time":"2018-03-08T10:33:12.34533033-08:00","Action":"pass","Package":"pkg/passed","Elapsed":0.013}
{"Time":"2018-03-08T10:33:20.27231537-08:00","Action":"output","Package":"pkg/panic","Test":"TestPanic","Output":"panic\n"}
{"Time":"2018-03-08T10:33:20.272414481-08:00","Action":"output","Package":"pkg/panic","Test":"TestPanic","Output":"FAIL\tpkg/panic\t600.029s\n"}
{"Time":"2018-03-08T10:33:20.272440286-08:00","Action":"fail","Package":"pkg/panic","Test":"TestPanic","Elapsed":600.029}
{"Time":"2018-03-08T10:33:27.158776469-08:00","Action":"output","Package":"pkg/failed","Test":"TestFail","Output":"--- FAIL: TestFail (0.18s)\n"}
{"Time":"2018-03-08T10:33:27.158860934-08:00","Action":"fail","Package":"pkg/failed","Test":"TestFail","Elapsed":0.18}
{"Time":"2018-03-08T10:33:27.161302093-08:00","Action":"fail","Package":"pkg/failed","Elapsed":0.204}

View File

@ -1,66 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// Package dscp provides helper functions to apply DSCP / ECN / CoS flags to sockets.
package dscp
import (
"net"
"syscall"
"time"
)
// DialTCPWithTOS is similar to net.DialTCP but with the socket configured
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
// of service flags to use for incoming connections.
func DialTCPWithTOS(laddr, raddr *net.TCPAddr, tos byte) (*net.TCPConn, error) {
d := net.Dialer{
LocalAddr: laddr,
Control: func(network, address string, c syscall.RawConn) error {
return setTOS(network, c, tos)
},
}
conn, err := d.Dial("tcp", raddr.String())
if err != nil {
return nil, err
}
return conn.(*net.TCPConn), err
}
// DialTimeoutWithTOS is similar to net.DialTimeout but with the socket configured
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
// of service flags to use for incoming connections.
func DialTimeoutWithTOS(network, address string, timeout time.Duration, tos byte) (net.Conn,
error) {
d := net.Dialer{
Timeout: timeout,
Control: func(network, address string, c syscall.RawConn) error {
return setTOS(network, c, tos)
},
}
conn, err := d.Dial(network, address)
if err != nil {
return nil, err
}
return conn, err
}
// DialTCPTimeoutWithTOS is same as DialTimeoutWithTOS except for enforcing "tcp" and
// providing an option to specify local address (source)
func DialTCPTimeoutWithTOS(laddr, raddr *net.TCPAddr, tos byte, timeout time.Duration) (net.Conn,
error) {
d := net.Dialer{
Timeout: timeout,
LocalAddr: laddr,
Control: func(network, address string, c syscall.RawConn) error {
return setTOS(network, c, tos)
},
}
conn, err := d.Dial("tcp", raddr.String())
if err != nil {
return nil, err
}
return conn, err
}

View File

@ -1,103 +0,0 @@
// Copyright (c) 2017 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package dscp_test
import (
"fmt"
"net"
"strings"
"testing"
"time"
"github.com/aristanetworks/goarista/dscp"
)
func TestDialTCPWithTOS(t *testing.T) {
addr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}
listen, err := net.ListenTCP("tcp", addr)
if err != nil {
t.Fatal(err)
}
defer listen.Close()
done := make(chan struct{})
go func() {
conn, err := listen.Accept()
if err != nil {
t.Fatal(err)
}
defer conn.Close()
buf := []byte{'!'}
conn.Write(buf)
n, err := conn.Read(buf)
if n != 1 || err != nil {
t.Fatalf("Read returned %d / %s", n, err)
} else if buf[0] != '!' {
t.Fatalf("Expected to read '!' but got %q", buf)
}
close(done)
}()
conn, err := dscp.DialTCPWithTOS(nil, listen.Addr().(*net.TCPAddr), 40)
if err != nil {
t.Fatal("Connection failed:", err)
}
defer conn.Close()
buf := make([]byte, 1)
n, err := conn.Read(buf)
if n != 1 || err != nil {
t.Fatalf("Read returned %d / %s", n, err)
} else if buf[0] != '!' {
t.Fatalf("Expected to read '!' but got %q", buf)
}
conn.Write(buf)
<-done
}
func TestDialTCPTimeoutWithTOS(t *testing.T) {
raddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}
for name, td := range map[string]*net.TCPAddr{
"ipNoPort": &net.TCPAddr{
IP: net.ParseIP("127.0.0.42"), Port: 0,
},
"ipWithPort": &net.TCPAddr{
IP: net.ParseIP("127.0.0.42"), Port: 10001,
},
} {
t.Run(name, func(t *testing.T) {
l, err := net.ListenTCP("tcp", raddr)
if err != nil {
t.Fatal(err)
}
defer l.Close()
var srcAddr net.Addr
done := make(chan struct{})
go func() {
conn, err := l.Accept()
if err != nil {
t.Fatal(err)
}
defer conn.Close()
srcAddr = conn.RemoteAddr()
close(done)
}()
conn, err := dscp.DialTCPTimeoutWithTOS(td, l.Addr().(*net.TCPAddr), 40, 5*time.Second)
if err != nil {
t.Fatal("Connection failed:", err)
}
defer conn.Close()
pfx := td.IP.String() + ":"
if td.Port > 0 {
pfx = fmt.Sprintf("%s%d", pfx, td.Port)
}
<-done
if !strings.HasPrefix(srcAddr.String(), pfx) {
t.Fatalf("DialTCPTimeoutWithTOS wrong address: %q instead of %q", srcAddr, pfx)
}
})
}
}

View File

@ -1,57 +0,0 @@
// Copyright (c) 2019 Arista Networks, Inc.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
package dscp
import (
"context"
"net"
"os"
"strings"
"syscall"
"github.com/aristanetworks/glog"
"golang.org/x/sys/unix"
)
// ListenTCPWithTOS is similar to net.ListenTCP but with the socket configured
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
// of service flags to use for incoming connections.
func ListenTCPWithTOS(address *net.TCPAddr, tos byte) (*net.TCPListener, error) {
cfg := net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
return setTOS(network, c, tos)
},
}
lsnr, err := cfg.Listen(context.Background(), "tcp", address.String())
if err != nil {
return nil, err
}
return lsnr.(*net.TCPListener), err
}
func setTOS(network string, c syscall.RawConn, tos byte) error {
return c.Control(func(fd uintptr) {
// Configure ipv4 TOS for both IPv4 and IPv6 networks because
// v4 connections can still come over v6 networks.
err := unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_TOS, int(tos))
if err != nil {
glog.Errorf("failed to configure IP_TOS: %v", os.NewSyscallError("setsockopt", err))
}
if strings.HasSuffix(network, "4") {
// Skip configuring IPv6 when we know we are using an IPv4
// network to avoid error.
return
}
err6 := unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_TCLASS, int(tos))
if err6 != nil {
glog.Errorf(
"failed to configure IPV6_TCLASS, traffic may not use the configured DSCP: %v",
os.NewSyscallError("setsockopt", err6))
}
})
}

Some files were not shown because too many files have changed in this diff Show More