forked from cerc-io/ipld-eth-server
commit
f2547b5a42
@ -1,7 +1,7 @@
|
||||
dist: trusty
|
||||
language: go
|
||||
go:
|
||||
- 1.11
|
||||
- 1.12
|
||||
services:
|
||||
- postgresql
|
||||
addons:
|
||||
@ -17,8 +17,8 @@ before_script:
|
||||
- sudo -u postgres createdb vulcanize_private
|
||||
- make migrate NAME=vulcanize_private
|
||||
script:
|
||||
- make test
|
||||
- make integrationtest
|
||||
- env GO111MODULE=on make test
|
||||
- env GO111MODULE=on make integrationtest
|
||||
notifications:
|
||||
email: false
|
||||
env:
|
||||
|
580
Gopkg.lock
generated
580
Gopkg.lock
generated
@ -1,580 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:48a213e9dc4880bbbd6999309a476fa4d3cc67560aa7127154cf8ea95bd464c2"
|
||||
name = "github.com/allegro/bigcache"
|
||||
packages = [
|
||||
".",
|
||||
"queue",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f31987a23e44c5121ef8c8b2f2ea2e8ffa37b068"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a313376bcbcce8ae8bddb8089a7293e0473a0f8e9e3710d6244e09e81875ccf0"
|
||||
name = "github.com/aristanetworks/goarista"
|
||||
packages = ["monotime"]
|
||||
pruneopts = ""
|
||||
revision = "ff33da284e760fcdb03c33d37a719e5ed30ba844"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c6bf1ac7bbc0fe51637bf54d5a88ff79b171b3b42dbc665dec98303c862d8662"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = ""
|
||||
revision = "cff30e1d23fc9e800b2b5b4b41ef1817dda07e9f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d47691333460db6ac83ced03c79b4bdb9aff3e322be24affb7855bed8affc6c"
|
||||
name = "github.com/dave/jennifer"
|
||||
packages = ["jen"]
|
||||
pruneopts = ""
|
||||
revision = "14e399b6b5e8456c66c45c955fc27b568bacb5c9"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aaeffbff5bd24654cb4c190ed75d6c7b57b4f5d6741914c1a7a6bb7447e756c5"
|
||||
name = "github.com/deckarep/golang-set"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e"
|
||||
version = "v1.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:90d36f5b581e95e00ced808cd48824ed6c320c25887828cce461bdef4cb7bc7c"
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = [
|
||||
".",
|
||||
"accounts",
|
||||
"accounts/abi",
|
||||
"accounts/abi/bind",
|
||||
"accounts/keystore",
|
||||
"common",
|
||||
"common/bitutil",
|
||||
"common/hexutil",
|
||||
"common/math",
|
||||
"common/mclock",
|
||||
"common/prque",
|
||||
"core/rawdb",
|
||||
"core/types",
|
||||
"crypto",
|
||||
"crypto/ecies",
|
||||
"crypto/secp256k1",
|
||||
"ethclient",
|
||||
"ethdb",
|
||||
"ethdb/leveldb",
|
||||
"ethdb/memorydb",
|
||||
"event",
|
||||
"log",
|
||||
"metrics",
|
||||
"p2p",
|
||||
"p2p/discover",
|
||||
"p2p/discv5",
|
||||
"p2p/enode",
|
||||
"p2p/enr",
|
||||
"p2p/nat",
|
||||
"p2p/netutil",
|
||||
"params",
|
||||
"rlp",
|
||||
"rpc",
|
||||
"trie",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "cd79bc61a983d6482579d12cdd239b37bbfa12ef"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a01080d20c45c031c13f3828c56e58f4f51d926a482ad10cc0316225097eb7ea"
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = ""
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5247b135b5492aa232a731acdcb52b08f32b874cb398f21ab460396eadbe866b"
|
||||
name = "github.com/google/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d14365c51dd1d34d5c79833ec91413bfbb166be978724f15701e17080dc06dec"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/printer",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b3c5b95e56c06f5aa72cb2500e6ee5f44fcd122872d4fec2023a488e561218bc"
|
||||
name = "github.com/hpcloud/tail"
|
||||
packages = [
|
||||
".",
|
||||
"ratelimiter",
|
||||
"util",
|
||||
"watch",
|
||||
"winfile",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b6e4cc26365c004808649862e22069de09594a9222143399a7a04904e9f7018c"
|
||||
name = "github.com/huin/goupnp"
|
||||
packages = [
|
||||
".",
|
||||
"dcps/internetgateway1",
|
||||
"dcps/internetgateway2",
|
||||
"httpu",
|
||||
"scpd",
|
||||
"soap",
|
||||
"ssdp",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "1395d1447324cbea88d249fbfcfd70ea878fdfca"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:76f836364ae83ed811c415aa92e1209ce49de9f62aad85b85fca749a8b96a110"
|
||||
name = "github.com/jackpal/go-nat-pmp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c9cfead9f2a36ddf3daa40ba269aa7f4bbba6b62"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:617ee2434b77e911fa26b678730be9a617f75243b194eadc8201c8ac860844aa"
|
||||
name = "github.com/jmoiron/sqlx"
|
||||
packages = [
|
||||
".",
|
||||
"reflectx",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0dae4fefe7c0e190f7b5a78dac28a1c82cc8d849"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a874e3ddfb9db2b42bd8c85b6875407c702fa868eed20634ff489bc896ccfd3"
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:29145d7af4adafd72a79df5e41456ac9e232d5a28c1cd4dacf3ff008a217fc10"
|
||||
name = "github.com/lib/pq"
|
||||
packages = [
|
||||
".",
|
||||
"oid",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "4ded0e9383f75c197b3a2aaa6d590ac52df6fd79"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:961dc3b1d11f969370533390fdf203813162980c858e1dabe827b60940c909a5"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5219b4506253ccc598f9340677162a42d6a78f340a4cc6df2d62db4d0593c4e9"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a7fd918fb5bd2188436785c0424f8a50b4addfedf37a2b14d796be2a927b8007"
|
||||
name = "github.com/onsi/ginkgo"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"internal/codelocation",
|
||||
"internal/containernode",
|
||||
"internal/failer",
|
||||
"internal/leafnodes",
|
||||
"internal/remote",
|
||||
"internal/spec",
|
||||
"internal/spec_iterator",
|
||||
"internal/specrunner",
|
||||
"internal/suite",
|
||||
"internal/testingtproxy",
|
||||
"internal/writer",
|
||||
"reporters",
|
||||
"reporters/stenographer",
|
||||
"reporters/stenographer/support/go-colorable",
|
||||
"reporters/stenographer/support/go-isatty",
|
||||
"types",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "3774a09d95489ccaa16032e0770d08ea77ba6184"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3ecd0a37c4a90c12a97e31c398cdbc173824351aa891898ee178120bfe71c478"
|
||||
name = "github.com/onsi/gomega"
|
||||
packages = [
|
||||
".",
|
||||
"format",
|
||||
"ghttp",
|
||||
"internal/assertion",
|
||||
"internal/asyncassertion",
|
||||
"internal/oraclematcher",
|
||||
"internal/testingtsupport",
|
||||
"matchers",
|
||||
"matchers/support/goraph/bipartitegraph",
|
||||
"matchers/support/goraph/edge",
|
||||
"matchers/support/goraph/node",
|
||||
"matchers/support/goraph/util",
|
||||
"types",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "7615b9433f86a8bdf29709bf288bc4fd0636a369"
|
||||
version = "v1.4.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a5484d4fa43127138ae6e7b2299a6a52ae006c7f803d98d717f60abf3e97192e"
|
||||
name = "github.com/pborman/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:894aef961c056b6d85d12bac890bf60c44e99b46292888bfa66caf529f804457"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fdbe7e05d74cc4d175cc4515a7807a5bb8b66ebe130da382b99713c9038648ae"
|
||||
name = "github.com/pressly/goose"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "e4b98955473e91a12fc7d8816c28d06376d1d92c"
|
||||
version = "v2.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7143292549152d009ca9e9c493b74736a2ebd93f921bea8a4b308d7cc5edc6b3"
|
||||
name = "github.com/rjeczalik/notify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0f065fa99b48b842c3fd3e2c8b194c6f2b69f6b8"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:78c9cf43ddeacd0e472f412082227a0fac2ae107ee60e9112156f9371f9912cf"
|
||||
name = "github.com/rs/cors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "3fb1b69b103a84de38a19c3c6ec073dd6caa4d3f"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d57e200ef5ccc4217fe0a34287308bac652435e7c6513f6263e0493d2245c56"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d0431c2fd72e39ee43ea7742322abbc200c3e704c9102c5c3c2e2e667095b0ca"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d0b38ba6da419a6d4380700218eeec8623841d44a856bb57369c172fbf692ab4"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "8965335b8c7107321228e3e3702cab9832751bac"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1403cc8a94b8d7956ee5e9694badef0e7b051af289caad1cf668331e3ffa4f6"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9ceffa4ab5f7195ecf18b3a7fff90c837a9ed5e22e66d18069e4bccfe1f52aa0"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0a52bcb568386d98f4894575d53ce3e456f56471de6897bb8b9de13c33d9340e"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac25ea6cc1156aca9611411274b4a0bdd83a623845df6985aab508253955cc66"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "8fb642006536c8d3760c99d4fa2389f5e2205631"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ce5194e5afac308cc34e500cab45b4ce88a0742d689e3cf7e37b607ad76bed2f"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
"leveldb/cache",
|
||||
"leveldb/comparer",
|
||||
"leveldb/errors",
|
||||
"leveldb/filter",
|
||||
"leveldb/iterator",
|
||||
"leveldb/journal",
|
||||
"leveldb/memdb",
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ae2bd5eed72d46b28834ec3f60db3a3ebedd8dbd"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:59b49c47c11a48f1054529207f65907c014ecf5f9a7c0d9c0f1616dec7b062ed"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"pbkdf2",
|
||||
"scrypt",
|
||||
"sha3",
|
||||
"ssh/terminal",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fbdbb6cf8db3278412c9425ad78b26bb8eb788181f26a3ffb3e4f216b314f86a"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"html",
|
||||
"html/atom",
|
||||
"html/charset",
|
||||
"websocket",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:70d519d5cddeb60ceda2db88c24c340b1b2d7efb25ab54bacb38f57ea1998df7"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d641721ec2dead6fe5ca284096fe4b1fcd49e427"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"encoding",
|
||||
"encoding/charmap",
|
||||
"encoding/htmlindex",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/japanese",
|
||||
"encoding/korean",
|
||||
"encoding/simplifiedchinese",
|
||||
"encoding/traditionalchinese",
|
||||
"encoding/unicode",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"language",
|
||||
"runes",
|
||||
"transform",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
source = "gopkg.in/fsnotify/fsnotify.v1"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:4f830ee018eb8c56d0def653ad7c9a1d2a053f0cef2ac6b2200f73b98fa6a681"
|
||||
name = "gopkg.in/natefinch/npipe.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6"
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd"
|
||||
name = "gopkg.in/tomb.v1"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/dave/jennifer/jen",
|
||||
"github.com/ethereum/go-ethereum",
|
||||
"github.com/ethereum/go-ethereum/accounts/abi",
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind",
|
||||
"github.com/ethereum/go-ethereum/common",
|
||||
"github.com/ethereum/go-ethereum/common/hexutil",
|
||||
"github.com/ethereum/go-ethereum/core/rawdb",
|
||||
"github.com/ethereum/go-ethereum/core/types",
|
||||
"github.com/ethereum/go-ethereum/crypto",
|
||||
"github.com/ethereum/go-ethereum/ethclient",
|
||||
"github.com/ethereum/go-ethereum/ethdb",
|
||||
"github.com/ethereum/go-ethereum/p2p",
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5",
|
||||
"github.com/ethereum/go-ethereum/rlp",
|
||||
"github.com/ethereum/go-ethereum/rpc",
|
||||
"github.com/hashicorp/golang-lru",
|
||||
"github.com/hpcloud/tail",
|
||||
"github.com/jmoiron/sqlx",
|
||||
"github.com/lib/pq",
|
||||
"github.com/mitchellh/go-homedir",
|
||||
"github.com/onsi/ginkgo",
|
||||
"github.com/onsi/gomega",
|
||||
"github.com/onsi/gomega/ghttp",
|
||||
"github.com/pressly/goose",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/viper",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"gopkg.in/tomb.v1",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
54
Gopkg.toml
54
Gopkg.toml
@ -1,54 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[override]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
source = "gopkg.in/fsnotify/fsnotify.v1"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/pressly/sup"
|
||||
version = "0.5.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/onsi/ginkgo"
|
||||
version = "1.4.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/jmoiron/sqlx"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/lib/pq"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "0.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
revision = "cd79bc61a983d6482579d12cdd239b37bbfa12ef"
|
13
Makefile
13
Makefile
@ -3,11 +3,6 @@ BASE = $(GOPATH)/src/$(PACKAGE)
|
||||
PKGS = go list ./... | grep -v "^vendor/"
|
||||
|
||||
# Tools
|
||||
## Dependency management
|
||||
DEP = $(BIN)/dep
|
||||
$(BIN)/dep:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
|
||||
## Testing library
|
||||
GINKGO = $(BIN)/ginkgo
|
||||
$(BIN)/ginkgo:
|
||||
@ -32,7 +27,7 @@ $(BIN)/gometalinter.v2:
|
||||
|
||||
|
||||
.PHONY: installtools
|
||||
installtools: | $(LINT) $(GOOSE) $(GINKGO) $(DEP)
|
||||
installtools: | $(LINT) $(GOOSE) $(GINKGO)
|
||||
echo "Installing tools"
|
||||
|
||||
.PHONY: metalint
|
||||
@ -58,11 +53,7 @@ integrationtest: | $(GINKGO) $(LINT)
|
||||
go fmt ./...
|
||||
$(GINKGO) -r integration_test/
|
||||
|
||||
.PHONY: dep
|
||||
dep: | $(DEP)
|
||||
$(DEP) ensure
|
||||
|
||||
build: dep
|
||||
build:
|
||||
go fmt ./...
|
||||
go build
|
||||
|
||||
|
11
README.md
11
README.md
@ -35,7 +35,7 @@ data from VulcanizeDB's underlying Postgres database and making it accessible.
|
||||
1. [Configuring a synced Ethereum node](#configuring-a-synced-ethereum-node)
|
||||
|
||||
### Dependencies
|
||||
- Go 1.11+
|
||||
- Go 1.12+
|
||||
- Postgres 11.2
|
||||
- Ethereum Node
|
||||
- [Go Ethereum](https://ethereum.github.io/go-ethereum/downloads/) (1.8.23+)
|
||||
@ -46,16 +46,17 @@ Download the codebase to your local `GOPATH` via:
|
||||
|
||||
`go get github.com/vulcanize/vulcanizedb`
|
||||
|
||||
Move to the project directory and use [golang/dep](https://github.com/golang/dep) to install the dependencies:
|
||||
Move to the project directory:
|
||||
|
||||
`cd $GOPATH/src/github.com/vulcanize/vulcanizedb`
|
||||
|
||||
`dep ensure`
|
||||
|
||||
Once the dependencies have been successfully installed, build the executable with:
|
||||
Be sure you have enabled Go Modules (`export GO111MODULE=on`), and build the executable with:
|
||||
|
||||
`make build`
|
||||
|
||||
If you need to use a different dependency than what is currently defined in `go.mod`, it may helpful to look into [the replace directive](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive).
|
||||
This instruction enables you to point at a fork or the local filesystem for dependency resolution.
|
||||
|
||||
If you are running into issues at this stage, ensure that `GOPATH` is defined in your shell.
|
||||
If necessary, `GOPATH` can be set in `~/.bashrc` or `~/.bash_profile`, depending upon your system.
|
||||
It can be additionally helpful to add `$GOPATH/bin` to your shell's `$PATH`.
|
||||
|
@ -39,7 +39,7 @@ or [contract](../../staging/libraries/shared/watcher/contract_watcher.go#L68) wa
|
||||
* If the base vDB migrations occupy this path as well, they need to be in their `goose fix`ed form
|
||||
as they are [here](../../staging/db/migrations)
|
||||
|
||||
To update a plugin repository with changes to the core vulcanizedb repository, run `dep ensure` to update its dependencies.
|
||||
To update a plugin repository with changes to the core vulcanizedb repository, use your dependency manager to install the desired version of vDB.
|
||||
|
||||
## Building and Running Custom Transformers
|
||||
### Commands
|
||||
|
65
go.mod
Normal file
65
go.mod
Normal file
@ -0,0 +1,65 @@
|
||||
module github.com/vulcanize/vulcanizedb
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/allegro/bigcache v1.1.0 // indirect
|
||||
github.com/apilayer/freegeoip v3.5.0+incompatible // indirect
|
||||
github.com/aristanetworks/goarista v0.0.0-20180907105523-ff33da284e76 // indirect
|
||||
github.com/btcsuite/btcd v0.0.0-20180903232927-cff30e1d23fc // indirect
|
||||
github.com/cespare/cp v1.1.1 // indirect
|
||||
github.com/dave/jennifer v1.3.0
|
||||
github.com/deckarep/golang-set v1.7.1 // indirect
|
||||
github.com/docker/docker v1.13.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/elastic/gosigar v0.10.4 // indirect
|
||||
github.com/ethereum/go-ethereum v1.9.0
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||
github.com/golang/protobuf v1.3.2 // indirect
|
||||
github.com/google/uuid v1.0.0 // indirect
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598 // indirect
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47
|
||||
github.com/howeyc/fsnotify v0.9.0 // indirect
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/influxdata/influxdb v1.7.7 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.1 // indirect
|
||||
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0
|
||||
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0 // indirect
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.11.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.0.0
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.1 // indirect
|
||||
github.com/onsi/ginkgo v1.6.0
|
||||
github.com/onsi/gomega v1.4.2
|
||||
github.com/oschwald/maxminddb-golang v1.3.1 // indirect
|
||||
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/pressly/goose v2.6.0+incompatible
|
||||
github.com/prometheus/tsdb v0.9.1 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.5.0 // indirect
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/viper v1.2.0
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f // indirect
|
||||
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2 // indirect
|
||||
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.0.0 // indirect
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 // indirect
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
google.golang.org/appengine v1.6.1 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
)
|
226
go.sum
Normal file
226
go.sum
Normal file
@ -0,0 +1,226 @@
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.1.0 h1:MLuIKTjdxDc+qsG2rhjsYjsHQC5LUGjIWzutg7M+W68=
|
||||
github.com/allegro/bigcache v1.1.0/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/apilayer/freegeoip v3.5.0+incompatible h1:z1u2gv0/rsSi/HqMDB436AiUROXXim7st5DOg4Ikl4A=
|
||||
github.com/apilayer/freegeoip v3.5.0+incompatible/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU=
|
||||
github.com/aristanetworks/goarista v0.0.0-20180907105523-ff33da284e76 h1:64W/KrGykPTfDI9xTkZtnjZRYA5p2+c/IuGgjzeWCpI=
|
||||
github.com/aristanetworks/goarista v0.0.0-20180907105523-ff33da284e76/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/btcsuite/btcd v0.0.0-20180903232927-cff30e1d23fc h1:nLRj+ULuRYb0qTAOnuayFXRnLjYXBots5CSp5zH4RqU=
|
||||
github.com/btcsuite/btcd v0.0.0-20180903232927-cff30e1d23fc/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
|
||||
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/dave/jennifer v1.3.0 h1:p3tl41zjjCZTNBytMwrUuiAnherNUZktlhPTKoF/sEk=
|
||||
github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
|
||||
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
|
||||
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/gosigar v0.10.4 h1:6jfw75dsoflhBMRdO6QPzQUgLqUYTsQQQRkkcsHsuPo=
|
||||
github.com/elastic/gosigar v0.10.4/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
|
||||
github.com/ethereum/go-ethereum v1.9.0 h1:9Kaf7UfDkV3aIUJlf14hI/GgEgRAUq60u4fBlb9dLWw=
|
||||
github.com/ethereum/go-ethereum v1.9.0/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598 h1:XLoCW/kXxbvPvp216Kq/c+TtwWYHy9sjeDidFcG45g0=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20190610161739-8f92f34fc598/go.mod h1:Au3iQ8DvDis8hZ4q2OzRcaKYlAsPt+fYvib5q4nIqu4=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/howeyc/fsnotify v0.9.0 h1:0gtV5JmOKH4A8SsFxG2BczSeXWWPvcMT0euZt5gDAxY=
|
||||
github.com/howeyc/fsnotify v0.9.0/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324 h1:PV190X5/DzQ/tbFFG5YpT5mH6q+cHlfgqI5JuRnH9oE=
|
||||
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb v1.7.7 h1:UvNzAPfBrKMENVbQ4mr4ccA9sW+W1Ihl0Yh1s0BiVAg=
|
||||
github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0 h1:5B0uxl2lzNRVkJVg+uGHxWtRt4C0Wjc6kJKo5XYx8xE=
|
||||
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0 h1:S8kWZLXHpcOq3nGAvIs0oDgd4CXxkxE3hkDVRjTu7ro=
|
||||
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.0.0 h1:vVpGvMXJPqSDh2VYHF7gsfQj8Ncx+Xw5Y1KHeTRY+7I=
|
||||
github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
|
||||
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/oschwald/maxminddb-golang v1.3.1 h1:kPc5+ieL5CC/Zn0IaXJPxDFlUxKTQEU8QBTtmfQDAIo=
|
||||
github.com/oschwald/maxminddb-golang v1.3.1/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY=
|
||||
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 h1:zNBQb37RGLmJybyMcs983HfUfpkw9OTFD9tbBfAViHE=
|
||||
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose v2.6.0+incompatible h1:3f8zIQ8rfgP9tyI0Hmcs2YNAqUCL1c+diLe3iU8Qd/k=
|
||||
github.com/pressly/goose v2.6.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.9.1 h1:IWaAmWkYlgG7/S4iw4IpAQt5Y35QaZM6/GsZ7GsjAuk=
|
||||
github.com/prometheus/tsdb v0.9.1/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rs/cors v1.5.0 h1:dgSHE6+ia18arGOTIYQKKGWLvEbGvmbNE6NfxhoNHUY=
|
||||
github.com/rs/cors v1.5.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg=
|
||||
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
|
||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.2.0 h1:M4Rzxlu+RgU4pyBRKhKaVN1VeYOm8h2jgyXnAseDgCc=
|
||||
github.com/spf13/viper v1.2.0/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f h1:T7YHzO3/eqD/kv5m9+TLM4XuEAkN7NPj5pnZHqaOo/Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2 h1:o6NMd68tuqfQ0ZFnz2d16xzFNLWxrCvqF40InOJJHSM=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc=
|
||||
github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/tyler-smith/go-bip39 v1.0.0 h1:FOHg9gaQLeBBRbHE/QrTLfEiBHy5pQ/yXzf9JG5pYFM=
|
||||
github.com/tyler-smith/go-bip39 v1.0.0/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c h1:+EXw7AwNOKzPFXMZ1yNjO40aWCh3PIquJB2fYlv9wcs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff h1:uuol9OUzSvZntY1v963NAbVd7A+PHLMz1FlCe3Lorcs=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
@ -220,7 +220,7 @@ var _ = Describe("Parser", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
selectMethods := p.GetMethods([]string{})
|
||||
Expect(len(selectMethods)).To(Equal(22))
|
||||
Expect(len(selectMethods)).To(Equal(25))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -29,7 +29,7 @@ type Block struct {
|
||||
Number int64 `db:"number"`
|
||||
ParentHash string `db:"parent_hash"`
|
||||
Size string `db:"size"`
|
||||
Time int64 `db:"time"`
|
||||
Time uint64 `db:"time"`
|
||||
Transactions []TransactionModel
|
||||
UncleHash string `db:"uncle_hash"`
|
||||
UnclesReward string `db:"uncles_reward"`
|
||||
|
@ -42,7 +42,7 @@ type POAHeader struct {
|
||||
Number *hexutil.Big `json:"number" gencodec:"required"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
type Reader interface {
|
||||
@ -48,7 +49,7 @@ func (ldbr *LevelDatabaseReader) GetBlockNumber(hash common.Hash) *uint64 {
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetBlockReceipts(hash common.Hash, number uint64) types.Receipts {
|
||||
return rawdb.ReadReceipts(ldbr.reader, hash, number)
|
||||
return rawdb.ReadReceipts(ldbr.reader, hash, number, ¶ms.ChainConfig{})
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetCanonicalHash(number uint64) common.Hash {
|
||||
|
@ -80,7 +80,7 @@ var _ = Describe("Saving blocks", func() {
|
||||
blockNonce := "0x881db2ca900682e9a9"
|
||||
miner := "x123"
|
||||
extraData := "xextraData"
|
||||
blockTime := int64(1508981640)
|
||||
blockTime := uint64(1508981640)
|
||||
uncleHash := "x789"
|
||||
blockSize := string("1000")
|
||||
difficulty := int64(10)
|
||||
@ -98,7 +98,7 @@ var _ = Describe("Saving blocks", func() {
|
||||
Number: blockNumber,
|
||||
ParentHash: blockParentHash,
|
||||
Size: blockSize,
|
||||
Time: blockTime,
|
||||
Time: uint64(blockTime),
|
||||
UncleHash: uncleHash,
|
||||
UnclesReward: unclesReward,
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ func (blockChain *BlockChain) getPOAHeader(blockNumber int64) (header core.Heade
|
||||
Number: POAHeader.Number.ToInt(),
|
||||
GasLimit: uint64(POAHeader.GasLimit),
|
||||
GasUsed: uint64(POAHeader.GasUsed),
|
||||
Time: POAHeader.Time.ToInt(),
|
||||
Time: uint64(POAHeader.Time),
|
||||
Extra: POAHeader.Extra,
|
||||
}, POAHeader.Hash.String()), nil
|
||||
}
|
||||
@ -211,7 +211,7 @@ func (blockChain *BlockChain) getPOAHeaders(blockNumbers []int64) (headers []cor
|
||||
Number: POAHeader.Number.ToInt(),
|
||||
GasLimit: uint64(POAHeader.GasLimit),
|
||||
GasUsed: uint64(POAHeader.GasUsed),
|
||||
Time: POAHeader.Time.ToInt(),
|
||||
Time: uint64(POAHeader.Time),
|
||||
Extra: POAHeader.Extra,
|
||||
}, POAHeader.Hash.String())
|
||||
|
||||
|
@ -19,6 +19,7 @@ package common
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -52,7 +53,7 @@ func (bc BlockConverter) ToCoreBlock(gethBlock *types.Block) (core.Block, error)
|
||||
Number: gethBlock.Number().Int64(),
|
||||
ParentHash: gethBlock.ParentHash().Hex(),
|
||||
Size: gethBlock.Size().String(),
|
||||
Time: gethBlock.Time().Int64(),
|
||||
Time: gethBlock.Time(),
|
||||
Transactions: transactions,
|
||||
UncleHash: gethBlock.UncleHash().Hex(),
|
||||
}
|
||||
@ -81,7 +82,7 @@ func (bc BlockConverter) ToCoreUncle(block core.Block, uncles []*types.Header) (
|
||||
Hash: uncle.Hash().Hex(),
|
||||
Raw: raw,
|
||||
Reward: thisUncleReward.String(),
|
||||
Timestamp: uncle.Time.String(),
|
||||
Timestamp: strconv.FormatUint(uncle.Time, 10),
|
||||
}
|
||||
coreUncles = append(coreUncles, coreUncle)
|
||||
totalUncleRewards.Add(totalUncleRewards, thisUncleReward)
|
||||
|
@ -44,7 +44,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
extraData, _ := hexutil.Decode("0xe4b883e5bda9e7a59ee4bb99e9b1bc")
|
||||
nonce := types.BlockNonce{10}
|
||||
number := int64(1)
|
||||
time := int64(140000000)
|
||||
time := uint64(140000000)
|
||||
|
||||
header := types.Header{
|
||||
Difficulty: difficulty,
|
||||
@ -55,7 +55,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
Nonce: nonce,
|
||||
Number: big.NewInt(number),
|
||||
ParentHash: common.Hash{64},
|
||||
Time: big.NewInt(time),
|
||||
Time: time,
|
||||
UncleHash: common.Hash{128},
|
||||
}
|
||||
block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{})
|
||||
|
@ -18,6 +18,7 @@ package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
@ -34,7 +35,7 @@ func (converter HeaderConverter) Convert(gethHeader *types.Header, blockHash str
|
||||
Hash: blockHash,
|
||||
BlockNumber: gethHeader.Number.Int64(),
|
||||
Raw: rawHeader,
|
||||
Timestamp: gethHeader.Time.String(),
|
||||
Timestamp: strconv.FormatUint(gethHeader.Time, 10),
|
||||
}
|
||||
return coreHeader
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ package common_test
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -37,7 +38,7 @@ var _ = Describe("Block header converter", func() {
|
||||
ParentHash: common.HexToHash("0xParent"),
|
||||
ReceiptHash: common.HexToHash("0xReceipt"),
|
||||
Root: common.HexToHash("0xRoot"),
|
||||
Time: big.NewInt(123456789),
|
||||
Time: uint64(123456789),
|
||||
TxHash: common.HexToHash("0xTransaction"),
|
||||
UncleHash: common.HexToHash("0xUncle"),
|
||||
}
|
||||
@ -48,7 +49,7 @@ var _ = Describe("Block header converter", func() {
|
||||
|
||||
Expect(coreHeader.BlockNumber).To(Equal(gethHeader.Number.Int64()))
|
||||
Expect(coreHeader.Hash).To(Equal(hash))
|
||||
Expect(coreHeader.Timestamp).To(Equal(gethHeader.Time.String()))
|
||||
Expect(coreHeader.Timestamp).To(Equal(strconv.FormatUint(gethHeader.Time, 10)))
|
||||
})
|
||||
|
||||
It("includes raw bytes for header as JSON", func() {
|
||||
|
5
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
5
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
||||
.idea
|
||||
.DS_Store
|
||||
/server/server.exe
|
||||
/server/server
|
||||
CHANGELOG.md
|
31
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
31
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
@ -1,31 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
before_install:
|
||||
- go get github.com/modocache/gover
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/gordonklaus/ineffassign
|
||||
|
||||
script:
|
||||
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
|
||||
- diff <(echo -n) <(gofmt -s -d .)
|
||||
- golint ./... # This won't break the build, just show warnings
|
||||
- ineffassign .
|
||||
- go vet ./...
|
||||
- go test -race -count=1 -coverprofile=queue.coverprofile ./queue
|
||||
- go test -race -count=1 -coverprofile=server.coverprofile ./server
|
||||
- go test -race -count=1 -coverprofile=main.coverprofile
|
||||
- $HOME/gopath/bin/gover
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=gover.coverprofile -service travis-ci
|
201
vendor/github.com/allegro/bigcache/LICENSE
generated
vendored
201
vendor/github.com/allegro/bigcache/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
145
vendor/github.com/allegro/bigcache/README.md
generated
vendored
145
vendor/github.com/allegro/bigcache/README.md
generated
vendored
@ -1,145 +0,0 @@
|
||||
# BigCache [](https://travis-ci.org/allegro/bigcache) [](https://coveralls.io/github/allegro/bigcache?branch=master) [](https://godoc.org/github.com/allegro/bigcache) [](https://goreportcard.com/report/github.com/allegro/bigcache)
|
||||
|
||||
Fast, concurrent, evicting in-memory cache written to keep big number of entries without impact on performance.
|
||||
BigCache keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
|
||||
therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||
|
||||
## Usage
|
||||
|
||||
### Simple initialization
|
||||
|
||||
```go
|
||||
import "github.com/allegro/bigcache"
|
||||
|
||||
cache, _ := bigcache.NewBigCache(bigcache.DefaultConfig(10 * time.Minute))
|
||||
|
||||
cache.Set("my-unique-key", []byte("value"))
|
||||
|
||||
entry, _ := cache.Get("my-unique-key")
|
||||
fmt.Println(string(entry))
|
||||
```
|
||||
|
||||
### Custom initialization
|
||||
|
||||
When cache load can be predicted in advance then it is better to use custom initialization because additional memory
|
||||
allocation can be avoided in that way.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/allegro/bigcache"
|
||||
)
|
||||
|
||||
config := bigcache.Config {
|
||||
// number of shards (must be a power of 2)
|
||||
Shards: 1024,
|
||||
// time after which entry can be evicted
|
||||
LifeWindow: 10 * time.Minute,
|
||||
// rps * lifeWindow, used only in initial memory allocation
|
||||
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||
// max entry size in bytes, used only in initial memory allocation
|
||||
MaxEntrySize: 500,
|
||||
// prints information about additional memory allocation
|
||||
Verbose: true,
|
||||
// cache will not allocate more memory than this limit, value in MB
|
||||
// if value is reached then the oldest entries can be overridden for the new ones
|
||||
// 0 value means no size limit
|
||||
HardMaxCacheSize: 8192,
|
||||
// callback fired when the oldest entry is removed because of its
|
||||
// expiration time or no space left for the new entry. Default value is nil which
|
||||
// means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove: nil,
|
||||
}
|
||||
|
||||
cache, initErr := bigcache.NewBigCache(config)
|
||||
if initErr != nil {
|
||||
log.Fatal(initErr)
|
||||
}
|
||||
|
||||
cache.Set("my-unique-key", []byte("value"))
|
||||
|
||||
if entry, err := cache.Get("my-unique-key"); err == nil {
|
||||
fmt.Println(string(entry))
|
||||
}
|
||||
```
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
|
||||
Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
|
||||
|
||||
### Writes and reads
|
||||
|
||||
```bash
|
||||
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
|
||||
|
||||
BenchmarkMapSet-8 2000000 716 ns/op 336 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapSet-8 1000000 1292 ns/op 347 B/op 8 allocs/op
|
||||
BenchmarkFreeCacheSet-8 3000000 501 ns/op 371 B/op 3 allocs/op
|
||||
BenchmarkBigCacheSet-8 3000000 482 ns/op 303 B/op 2 allocs/op
|
||||
BenchmarkMapGet-8 5000000 309 ns/op 24 B/op 1 allocs/op
|
||||
BenchmarkConcurrentMapGet-8 2000000 659 ns/op 24 B/op 2 allocs/op
|
||||
BenchmarkFreeCacheGet-8 3000000 541 ns/op 152 B/op 3 allocs/op
|
||||
BenchmarkBigCacheGet-8 3000000 420 ns/op 152 B/op 3 allocs/op
|
||||
BenchmarkBigCacheSetParallel-8 10000000 184 ns/op 313 B/op 3 allocs/op
|
||||
BenchmarkFreeCacheSetParallel-8 10000000 195 ns/op 357 B/op 4 allocs/op
|
||||
BenchmarkConcurrentMapSetParallel-8 5000000 242 ns/op 200 B/op 6 allocs/op
|
||||
BenchmarkBigCacheGetParallel-8 20000000 100 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkFreeCacheGetParallel-8 10000000 133 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkConcurrentMapGetParallel-8 10000000 202 ns/op 24 B/op 2 allocs/op
|
||||
```
|
||||
|
||||
Writes and reads in bigcache are faster than in freecache.
|
||||
Writes to map are the slowest.
|
||||
|
||||
### GC pause time
|
||||
|
||||
```bash
|
||||
cd caches_bench; go run caches_gc_overhead_comparison.go
|
||||
|
||||
Number of entries: 20000000
|
||||
GC pause for bigcache: 5.8658ms
|
||||
GC pause for freecache: 32.4341ms
|
||||
GC pause for map: 52.9661ms
|
||||
```
|
||||
|
||||
Test shows how long are the GC pauses for caches filled with 20mln of entries.
|
||||
Bigcache and freecache have very similar GC pause time.
|
||||
It is clear that both reduce GC overhead in contrast to map
|
||||
which GC pause time took more than 10 seconds.
|
||||
|
||||
## How it works
|
||||
|
||||
BigCache relies on optimization presented in 1.5 version of Go ([issue-9477](https://github.com/golang/go/issues/9477)).
|
||||
This optimization states that if map without pointers in keys and values is used then GC will omit its content.
|
||||
Therefore BigCache uses `map[uint64]uint32` where keys are hashed and values are offsets of entries.
|
||||
|
||||
Entries are kept in bytes array, to omit GC again.
|
||||
Bytes array size can grow to gigabytes without impact on performance
|
||||
because GC will only see single pointer to it.
|
||||
|
||||
## Bigcache vs Freecache
|
||||
|
||||
Both caches provide the same core features but they reduce GC overhead in different ways.
|
||||
Bigcache relies on `map[uint64]uint32`, freecache implements its own mapping built on
|
||||
slices to reduce number of pointers.
|
||||
|
||||
Results from benchmark tests are presented above.
|
||||
One of the advantage of bigcache over freecache is that you don’t need to know
|
||||
the size of the cache in advance, because when bigcache is full,
|
||||
it can allocate additional memory for new entries instead of
|
||||
overwriting existing ones as freecache does currently.
|
||||
However hard max size in bigcache also can be set, check [HardMaxCacheSize](https://godoc.org/github.com/allegro/bigcache#Config).
|
||||
|
||||
## HTTP Server
|
||||
|
||||
This package also includes an easily deployable HTTP implementation of BigCache, which can be found in the [server](/server) package.
|
||||
|
||||
## More
|
||||
|
||||
Bigcache genesis is described in allegro.tech blog post: [writing a very fast cache service in Go](http://allegro.tech/2016/03/writing-fast-cache-service-in-go.html)
|
||||
|
||||
## License
|
||||
|
||||
BigCache is released under the Apache 2.0 license (see [LICENSE](LICENSE))
|
155
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
155
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
@ -1,155 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
minimumEntriesInShard = 10 // Minimum number of entries in single shard
|
||||
)
|
||||
|
||||
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
||||
// It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
|
||||
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||
type BigCache struct {
|
||||
shards []*cacheShard
|
||||
lifeWindow uint64
|
||||
clock clock
|
||||
hash Hasher
|
||||
config Config
|
||||
shardMask uint64
|
||||
maxShardSize uint32
|
||||
}
|
||||
|
||||
// NewBigCache initialize new instance of BigCache
|
||||
func NewBigCache(config Config) (*BigCache, error) {
|
||||
return newBigCache(config, &systemClock{})
|
||||
}
|
||||
|
||||
func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||
|
||||
if !isPowerOfTwo(config.Shards) {
|
||||
return nil, fmt.Errorf("Shards number must be power of two")
|
||||
}
|
||||
|
||||
if config.Hasher == nil {
|
||||
config.Hasher = newDefaultHasher()
|
||||
}
|
||||
|
||||
cache := &BigCache{
|
||||
shards: make([]*cacheShard, config.Shards),
|
||||
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||
clock: clock,
|
||||
hash: config.Hasher,
|
||||
config: config,
|
||||
shardMask: uint64(config.Shards - 1),
|
||||
maxShardSize: uint32(config.maximumShardSize()),
|
||||
}
|
||||
|
||||
var onRemove func(wrappedEntry []byte)
|
||||
if config.OnRemove == nil {
|
||||
onRemove = cache.notProvidedOnRemove
|
||||
} else {
|
||||
onRemove = cache.providedOnRemove
|
||||
}
|
||||
|
||||
for i := 0; i < config.Shards; i++ {
|
||||
cache.shards[i] = initNewShard(config, onRemove, clock)
|
||||
}
|
||||
|
||||
if config.CleanWindow > 0 {
|
||||
go func() {
|
||||
for t := range time.Tick(config.CleanWindow) {
|
||||
cache.cleanUp(uint64(t.Unix()))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
// Get reads entry for the key.
|
||||
// It returns an EntryNotFoundError when
|
||||
// no entry exists for the given key.
|
||||
func (c *BigCache) Get(key string) ([]byte, error) {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
shard := c.getShard(hashedKey)
|
||||
return shard.get(key, hashedKey)
|
||||
}
|
||||
|
||||
// Set saves entry under the key
|
||||
func (c *BigCache) Set(key string, entry []byte) error {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
shard := c.getShard(hashedKey)
|
||||
return shard.set(key, hashedKey, entry)
|
||||
}
|
||||
|
||||
// Delete removes the key
|
||||
func (c *BigCache) Delete(key string) error {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
shard := c.getShard(hashedKey)
|
||||
return shard.del(key, hashedKey)
|
||||
}
|
||||
|
||||
// Reset empties all cache shards
|
||||
func (c *BigCache) Reset() error {
|
||||
for _, shard := range c.shards {
|
||||
shard.reset(c.config)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len computes number of entries in cache
|
||||
func (c *BigCache) Len() int {
|
||||
var len int
|
||||
for _, shard := range c.shards {
|
||||
len += shard.len()
|
||||
}
|
||||
return len
|
||||
}
|
||||
|
||||
// Stats returns cache's statistics
|
||||
func (c *BigCache) Stats() Stats {
|
||||
var s Stats
|
||||
for _, shard := range c.shards {
|
||||
tmp := shard.getStats()
|
||||
s.Hits += tmp.Hits
|
||||
s.Misses += tmp.Misses
|
||||
s.DelHits += tmp.DelHits
|
||||
s.DelMisses += tmp.DelMisses
|
||||
s.Collisions += tmp.Collisions
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
|
||||
func (c *BigCache) Iterator() *EntryInfoIterator {
|
||||
return newIterator(c)
|
||||
}
|
||||
|
||||
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
|
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||
if currentTimestamp-oldestTimestamp > c.lifeWindow {
|
||||
evict()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *BigCache) cleanUp(currentTimestamp uint64) {
|
||||
for _, shard := range c.shards {
|
||||
shard.cleanUp(currentTimestamp)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
|
||||
return c.shards[hashedKey&c.shardMask]
|
||||
}
|
||||
|
||||
func (c *BigCache) providedOnRemove(wrappedEntry []byte) {
|
||||
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
|
||||
}
|
||||
|
||||
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) {
|
||||
}
|
141
vendor/github.com/allegro/bigcache/bigcache_bench_test.go
generated
vendored
141
vendor/github.com/allegro/bigcache/bigcache_bench_test.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var message = blob('a', 256)
|
||||
|
||||
func BenchmarkWriteToCacheWith1Shard(b *testing.B) {
|
||||
writeToCache(b, 1, 100*time.Second, b.N)
|
||||
}
|
||||
|
||||
func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
|
||||
m := blob('a', 1024)
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 100 * time.Second,
|
||||
MaxEntriesInWindow: 100,
|
||||
MaxEntrySize: 256,
|
||||
HardMaxCacheSize: 1,
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(fmt.Sprintf("key-%d", i), m)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteToUnlimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
|
||||
m := blob('a', 1024)
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 100 * time.Second,
|
||||
MaxEntriesInWindow: 100,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(fmt.Sprintf("key-%d", i), m)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteToCache(b *testing.B) {
|
||||
for _, shards := range []int{1, 512, 1024, 8192} {
|
||||
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
|
||||
writeToCache(b, shards, 100*time.Second, b.N)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadFromCache(b *testing.B) {
|
||||
for _, shards := range []int{1, 512, 1024, 8192} {
|
||||
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
|
||||
readFromCache(b, 1024)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIterateOverCache(b *testing.B) {
|
||||
|
||||
m := blob('a', 1)
|
||||
|
||||
for _, shards := range []int{512, 1024, 8192} {
|
||||
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: shards,
|
||||
LifeWindow: 1000 * time.Second,
|
||||
MaxEntriesInWindow: max(b.N, 100),
|
||||
MaxEntrySize: 500,
|
||||
})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(fmt.Sprintf("key-%d", i), m)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
it := cache.Iterator()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for pb.Next() {
|
||||
if it.SetNext() {
|
||||
it.Value()
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
|
||||
writeToCache(b, 1024, 100*time.Second, 100)
|
||||
}
|
||||
|
||||
func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: shards,
|
||||
LifeWindow: lifeWindow,
|
||||
MaxEntriesInWindow: max(requestsInLifeWindow, 100),
|
||||
MaxEntrySize: 500,
|
||||
})
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
id := rand.Int()
|
||||
counter := 0
|
||||
|
||||
b.ReportAllocs()
|
||||
for pb.Next() {
|
||||
cache.Set(fmt.Sprintf("key-%d-%d", id, counter), message)
|
||||
counter = counter + 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func readFromCache(b *testing.B, shards int) {
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: shards,
|
||||
LifeWindow: 1000 * time.Second,
|
||||
MaxEntriesInWindow: max(b.N, 100),
|
||||
MaxEntrySize: 500,
|
||||
})
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(strconv.Itoa(i), message)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for pb.Next() {
|
||||
cache.Get(strconv.Itoa(rand.Intn(b.N)))
|
||||
}
|
||||
})
|
||||
}
|
579
vendor/github.com/allegro/bigcache/bigcache_test.go
generated
vendored
579
vendor/github.com/allegro/bigcache/bigcache_test.go
generated
vendored
@ -1,579 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var sink []byte
|
||||
|
||||
func TestParallel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
|
||||
value := []byte("value")
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
keys := 1337
|
||||
|
||||
// when
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), value)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < keys; i++ {
|
||||
sink, _ = cache.Get(fmt.Sprintf("key%d", i))
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Delete(fmt.Sprintf("key%d", i))
|
||||
}
|
||||
}()
|
||||
|
||||
// then
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestWriteAndGetOnCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(DefaultConfig(5 * time.Second))
|
||||
value := []byte("value")
|
||||
|
||||
// when
|
||||
cache.Set("key", value)
|
||||
cachedValue, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, value, cachedValue)
|
||||
}
|
||||
|
||||
func TestConstructCacheWithDefaultHasher(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 16,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 10,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
assert.IsType(t, fnv64a{}, cache.hash)
|
||||
}
|
||||
|
||||
func TestWillReturnErrorOnInvalidNumberOfPartitions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, error := NewBigCache(Config{
|
||||
Shards: 18,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 10,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
assert.Nil(t, cache)
|
||||
assert.Error(t, error, "Shards number must be power of two")
|
||||
}
|
||||
|
||||
func TestEntryNotFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 16,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 10,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
// when
|
||||
_, err := cache.Get("nonExistingKey")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Entry \"nonExistingKey\" not found")
|
||||
}
|
||||
|
||||
func TestTimingEviction(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
}, &clock)
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
clock.set(5)
|
||||
cache.Set("key2", []byte("value2"))
|
||||
_, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Entry \"key\" not found")
|
||||
}
|
||||
|
||||
func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 4,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
}, &clock)
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
clock.set(5)
|
||||
cache.Set("key2", []byte("value 2"))
|
||||
value, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err, "Entry \"key\" not found")
|
||||
assert.Equal(t, []byte("value"), value)
|
||||
}
|
||||
|
||||
func TestCleanShouldEvictAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 4,
|
||||
LifeWindow: time.Second,
|
||||
CleanWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
<-time.After(3 * time.Second)
|
||||
value, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Entry \"key\" not found")
|
||||
assert.Equal(t, value, []byte(nil))
|
||||
}
|
||||
|
||||
func TestOnRemoveCallback(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
onRemoveInvoked := false
|
||||
onRemove := func(key string, entry []byte) {
|
||||
onRemoveInvoked = true
|
||||
assert.Equal(t, "key", key)
|
||||
assert.Equal(t, []byte("value"), entry)
|
||||
}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
OnRemove: onRemove,
|
||||
}, &clock)
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
clock.set(5)
|
||||
cache.Set("key2", []byte("value2"))
|
||||
|
||||
// then
|
||||
assert.True(t, onRemoveInvoked)
|
||||
}
|
||||
|
||||
func TestCacheLen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
keys := 1337
|
||||
|
||||
// when
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
|
||||
}
|
||||
|
||||
// then
|
||||
assert.Equal(t, keys, cache.Len())
|
||||
}
|
||||
|
||||
func TestCacheStats(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
// when
|
||||
for i := 0; i < 100; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
value, err := cache.Get(fmt.Sprintf("key%d", i))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(value), "value")
|
||||
}
|
||||
for i := 100; i < 110; i++ {
|
||||
_, err := cache.Get(fmt.Sprintf("key%d", i))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
for i := 10; i < 20; i++ {
|
||||
err := cache.Delete(fmt.Sprintf("key%d", i))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
for i := 110; i < 120; i++ {
|
||||
err := cache.Delete(fmt.Sprintf("key%d", i))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// then
|
||||
stats := cache.Stats()
|
||||
assert.Equal(t, stats.Hits, int64(10))
|
||||
assert.Equal(t, stats.Misses, int64(10))
|
||||
assert.Equal(t, stats.DelHits, int64(10))
|
||||
assert.Equal(t, stats.DelMisses, int64(10))
|
||||
}
|
||||
|
||||
func TestCacheDel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(DefaultConfig(time.Second))
|
||||
|
||||
// when
|
||||
err := cache.Delete("nonExistingKey")
|
||||
|
||||
// then
|
||||
assert.Equal(t, err.Error(), "Entry \"nonExistingKey\" not found")
|
||||
|
||||
// and when
|
||||
cache.Set("existingKey", nil)
|
||||
err = cache.Delete("existingKey")
|
||||
cachedValue, _ := cache.Get("existingKey")
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, cachedValue, 0)
|
||||
}
|
||||
|
||||
func TestCacheReset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
keys := 1337
|
||||
|
||||
// when
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
|
||||
}
|
||||
|
||||
// then
|
||||
assert.Equal(t, keys, cache.Len())
|
||||
|
||||
// and when
|
||||
cache.Reset()
|
||||
|
||||
// then
|
||||
assert.Equal(t, 0, cache.Len())
|
||||
|
||||
// and when
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
|
||||
}
|
||||
|
||||
// then
|
||||
assert.Equal(t, keys, cache.Len())
|
||||
}
|
||||
|
||||
func TestIterateOnResetCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
keys := 1337
|
||||
|
||||
// when
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
|
||||
}
|
||||
cache.Reset()
|
||||
|
||||
// then
|
||||
iterator := cache.Iterator()
|
||||
|
||||
assert.Equal(t, false, iterator.SetNext())
|
||||
}
|
||||
|
||||
func TestGetOnResetCache(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
keys := 1337
|
||||
|
||||
// when
|
||||
for i := 0; i < keys; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), []byte("value"))
|
||||
}
|
||||
|
||||
cache.Reset()
|
||||
|
||||
// then
|
||||
value, err := cache.Get("key1")
|
||||
|
||||
assert.Equal(t, err.Error(), "Entry \"key1\" not found")
|
||||
assert.Equal(t, value, []byte(nil))
|
||||
}
|
||||
|
||||
func TestEntryUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 6 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
}, &clock)
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
clock.set(5)
|
||||
cache.Set("key", []byte("value2"))
|
||||
clock.set(7)
|
||||
cache.Set("key2", []byte("value3"))
|
||||
cachedValue, _ := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.Equal(t, []byte("value2"), cachedValue)
|
||||
}
|
||||
|
||||
func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 1,
|
||||
HardMaxCacheSize: 1,
|
||||
})
|
||||
|
||||
// when
|
||||
cache.Set("key1", blob('a', 1024*400))
|
||||
cache.Set("key2", blob('b', 1024*400))
|
||||
cache.Set("key3", blob('c', 1024*800))
|
||||
|
||||
_, key1Err := cache.Get("key1")
|
||||
_, key2Err := cache.Get("key2")
|
||||
entry3, _ := cache.Get("key3")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, key1Err, "Entry \"key1\" not found")
|
||||
assert.EqualError(t, key2Err, "Entry \"key2\" not found")
|
||||
assert.Equal(t, blob('c', 1024*800), entry3)
|
||||
}
|
||||
|
||||
func TestRetrievingEntryShouldCopy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 1,
|
||||
HardMaxCacheSize: 1,
|
||||
})
|
||||
cache.Set("key1", blob('a', 1024*400))
|
||||
value, key1Err := cache.Get("key1")
|
||||
|
||||
// when
|
||||
// override queue
|
||||
cache.Set("key2", blob('b', 1024*400))
|
||||
cache.Set("key3", blob('c', 1024*400))
|
||||
cache.Set("key4", blob('d', 1024*400))
|
||||
cache.Set("key5", blob('d', 1024*400))
|
||||
|
||||
// then
|
||||
assert.Nil(t, key1Err)
|
||||
assert.Equal(t, blob('a', 1024*400), value)
|
||||
}
|
||||
|
||||
func TestEntryBiggerThanMaxShardSizeError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 1,
|
||||
HardMaxCacheSize: 1,
|
||||
})
|
||||
|
||||
// when
|
||||
err := cache.Set("key1", blob('a', 1024*1025))
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "entry is bigger than max shard size")
|
||||
}
|
||||
|
||||
func TestHashCollision(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ml := &mockedLogger{}
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 16,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 10,
|
||||
MaxEntrySize: 256,
|
||||
Verbose: true,
|
||||
Hasher: hashStub(5),
|
||||
Logger: ml,
|
||||
})
|
||||
|
||||
// when
|
||||
cache.Set("liquid", []byte("value"))
|
||||
cachedValue, err := cache.Get("liquid")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("value"), cachedValue)
|
||||
|
||||
// when
|
||||
cache.Set("costarring", []byte("value 2"))
|
||||
cachedValue, err = cache.Get("costarring")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("value 2"), cachedValue)
|
||||
|
||||
// when
|
||||
cachedValue, err = cache.Get("liquid")
|
||||
|
||||
// then
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cachedValue)
|
||||
|
||||
assert.NotEqual(t, "", ml.lastFormat)
|
||||
assert.Equal(t, cache.Stats().Collisions, int64(1))
|
||||
}
|
||||
|
||||
func TestNilValueCaching(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: 5 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 1,
|
||||
HardMaxCacheSize: 1,
|
||||
})
|
||||
|
||||
// when
|
||||
cache.Set("Kierkegaard", []byte{})
|
||||
cachedValue, err := cache.Get("Kierkegaard")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, cachedValue)
|
||||
|
||||
// when
|
||||
cache.Set("Sartre", nil)
|
||||
cachedValue, err = cache.Get("Sartre")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, cachedValue)
|
||||
|
||||
// when
|
||||
cache.Set("Nietzsche", []byte(nil))
|
||||
cachedValue, err = cache.Get("Nietzsche")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{}, cachedValue)
|
||||
}
|
||||
|
||||
type mockedLogger struct {
|
||||
lastFormat string
|
||||
lastArgs []interface{}
|
||||
}
|
||||
|
||||
func (ml *mockedLogger) Printf(format string, v ...interface{}) {
|
||||
ml.lastFormat = format
|
||||
ml.lastArgs = v
|
||||
}
|
||||
|
||||
type mockedClock struct {
|
||||
value int64
|
||||
}
|
||||
|
||||
func (mc *mockedClock) epoch() int64 {
|
||||
return mc.value
|
||||
}
|
||||
|
||||
func (mc *mockedClock) set(value int64) {
|
||||
mc.value = value
|
||||
}
|
||||
|
||||
func blob(char byte, len int) []byte {
|
||||
b := make([]byte, len)
|
||||
for index := range b {
|
||||
b[index] = char
|
||||
}
|
||||
return b
|
||||
}
|
219
vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go
generated
vendored
219
vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go
generated
vendored
@ -1,219 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/allegro/bigcache"
|
||||
"github.com/coocood/freecache"
|
||||
)
|
||||
|
||||
const maxEntrySize = 256
|
||||
|
||||
func BenchmarkMapSet(b *testing.B) {
|
||||
m := make(map[string][]byte)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m[key(i)] = value()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentMapSet(b *testing.B) {
|
||||
var m sync.Map
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Store(key(i), value())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFreeCacheSet(b *testing.B) {
|
||||
cache := freecache.NewCache(b.N * maxEntrySize)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set([]byte(key(i)), value(), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBigCacheSet(b *testing.B) {
|
||||
cache := initBigCache(b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(key(i), value())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMapGet(b *testing.B) {
|
||||
b.StopTimer()
|
||||
m := make(map[string][]byte)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m[key(i)] = value()
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
hitCount := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
if m[key(i)] != nil {
|
||||
hitCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentMapGet(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var m sync.Map
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Store(key(i), value())
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
hitCounter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, ok := m.Load(key(i))
|
||||
if ok {
|
||||
hitCounter++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFreeCacheGet(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cache := freecache.NewCache(b.N * maxEntrySize)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set([]byte(key(i)), value(), 0)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Get([]byte(key(i)))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBigCacheGet(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cache := initBigCache(b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(key(i), value())
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Get(key(i))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBigCacheSetParallel(b *testing.B) {
|
||||
cache := initBigCache(b.N)
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
id := rand.Intn(1000)
|
||||
counter := 0
|
||||
for pb.Next() {
|
||||
cache.Set(parallelKey(id, counter), value())
|
||||
counter = counter + 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkFreeCacheSetParallel(b *testing.B) {
|
||||
cache := freecache.NewCache(b.N * maxEntrySize)
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
id := rand.Intn(1000)
|
||||
counter := 0
|
||||
for pb.Next() {
|
||||
cache.Set([]byte(parallelKey(id, counter)), value(), 0)
|
||||
counter = counter + 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentMapSetParallel(b *testing.B) {
|
||||
var m sync.Map
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
id := rand.Intn(1000)
|
||||
for pb.Next() {
|
||||
m.Store(key(id), value())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBigCacheGetParallel(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cache := initBigCache(b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set(key(i), value())
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
counter := 0
|
||||
for pb.Next() {
|
||||
cache.Get(key(counter))
|
||||
counter = counter + 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkFreeCacheGetParallel(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cache := freecache.NewCache(b.N * maxEntrySize)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Set([]byte(key(i)), value(), 0)
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
counter := 0
|
||||
for pb.Next() {
|
||||
cache.Get([]byte(key(counter)))
|
||||
counter = counter + 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkConcurrentMapGetParallel(b *testing.B) {
|
||||
b.StopTimer()
|
||||
var m sync.Map
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Store(key(i), value())
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
hitCount := 0
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
id := rand.Intn(1000)
|
||||
for pb.Next() {
|
||||
_, ok := m.Load(key(id))
|
||||
if ok {
|
||||
hitCount++
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func key(i int) string {
|
||||
return fmt.Sprintf("key-%010d", i)
|
||||
}
|
||||
|
||||
func value() []byte {
|
||||
return make([]byte, 100)
|
||||
}
|
||||
|
||||
func parallelKey(threadID int, counter int) string {
|
||||
return fmt.Sprintf("key-%04d-%06d", threadID, counter)
|
||||
}
|
||||
|
||||
func initBigCache(entriesInWindow int) *bigcache.BigCache {
|
||||
cache, _ := bigcache.NewBigCache(bigcache.Config{
|
||||
Shards: 256,
|
||||
LifeWindow: 10 * time.Minute,
|
||||
MaxEntriesInWindow: entriesInWindow,
|
||||
MaxEntrySize: maxEntrySize,
|
||||
Verbose: true,
|
||||
})
|
||||
|
||||
return cache
|
||||
}
|
96
vendor/github.com/allegro/bigcache/caches_bench/caches_gc_overhead_comparison.go
generated
vendored
96
vendor/github.com/allegro/bigcache/caches_bench/caches_gc_overhead_comparison.go
generated
vendored
@ -1,96 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/allegro/bigcache"
|
||||
"github.com/coocood/freecache"
|
||||
)
|
||||
|
||||
func gcPause() time.Duration {
|
||||
runtime.GC()
|
||||
var stats debug.GCStats
|
||||
debug.ReadGCStats(&stats)
|
||||
return stats.PauseTotal
|
||||
}
|
||||
|
||||
const (
|
||||
entries = 20000000
|
||||
valueSize = 100
|
||||
)
|
||||
|
||||
func main() {
|
||||
debug.SetGCPercent(10)
|
||||
fmt.Println("Number of entries: ", entries)
|
||||
|
||||
config := bigcache.Config{
|
||||
Shards: 256,
|
||||
LifeWindow: 100 * time.Minute,
|
||||
MaxEntriesInWindow: entries,
|
||||
MaxEntrySize: 200,
|
||||
Verbose: true,
|
||||
}
|
||||
|
||||
bigcache, _ := bigcache.NewBigCache(config)
|
||||
for i := 0; i < entries; i++ {
|
||||
key, val := generateKeyValue(i, valueSize)
|
||||
bigcache.Set(key, val)
|
||||
}
|
||||
|
||||
firstKey, _ := generateKeyValue(1, valueSize)
|
||||
checkFirstElement(bigcache.Get(firstKey))
|
||||
|
||||
fmt.Println("GC pause for bigcache: ", gcPause())
|
||||
bigcache = nil
|
||||
gcPause()
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
freeCache := freecache.NewCache(entries * 200) //allocate entries * 200 bytes
|
||||
for i := 0; i < entries; i++ {
|
||||
key, val := generateKeyValue(i, valueSize)
|
||||
if err := freeCache.Set([]byte(key), val, 0); err != nil {
|
||||
fmt.Println("Error in set: ", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
firstKey, _ = generateKeyValue(1, valueSize)
|
||||
checkFirstElement(freeCache.Get([]byte(firstKey)))
|
||||
|
||||
if freeCache.OverwriteCount() != 0 {
|
||||
fmt.Println("Overwritten: ", freeCache.OverwriteCount())
|
||||
}
|
||||
fmt.Println("GC pause for freecache: ", gcPause())
|
||||
freeCache = nil
|
||||
gcPause()
|
||||
|
||||
//------------------------------------------
|
||||
|
||||
mapCache := make(map[string][]byte)
|
||||
for i := 0; i < entries; i++ {
|
||||
key, val := generateKeyValue(i, valueSize)
|
||||
mapCache[key] = val
|
||||
}
|
||||
fmt.Println("GC pause for map: ", gcPause())
|
||||
|
||||
}
|
||||
|
||||
func checkFirstElement(val []byte, err error) {
|
||||
_, expectedVal := generateKeyValue(1, valueSize)
|
||||
if err != nil {
|
||||
fmt.Println("Error in get: ", err.Error())
|
||||
} else if string(val) != string(expectedVal) {
|
||||
fmt.Println("Wrong first element: ", string(val))
|
||||
}
|
||||
}
|
||||
|
||||
func generateKeyValue(index int, valSize int) (string, []byte) {
|
||||
key := fmt.Sprintf("key-%010d", index)
|
||||
fixedNumber := []byte(fmt.Sprintf("%010d", index))
|
||||
val := append(make([]byte, valSize-10), fixedNumber...)
|
||||
|
||||
return key, val
|
||||
}
|
14
vendor/github.com/allegro/bigcache/clock.go
generated
vendored
14
vendor/github.com/allegro/bigcache/clock.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "time"
|
||||
|
||||
type clock interface {
|
||||
epoch() int64
|
||||
}
|
||||
|
||||
type systemClock struct {
|
||||
}
|
||||
|
||||
func (c systemClock) epoch() int64 {
|
||||
return time.Now().Unix()
|
||||
}
|
67
vendor/github.com/allegro/bigcache/config.go
generated
vendored
67
vendor/github.com/allegro/bigcache/config.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "time"
|
||||
|
||||
// Config for BigCache
|
||||
type Config struct {
|
||||
// Number of cache shards, value must be a power of two
|
||||
Shards int
|
||||
// Time after which entry can be evicted
|
||||
LifeWindow time.Duration
|
||||
// Interval between removing expired entries (clean up).
|
||||
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
|
||||
CleanWindow time.Duration
|
||||
// Max number of entries in life window. Used only to calculate initial size for cache shards.
|
||||
// When proper value is set then additional memory allocation does not occur.
|
||||
MaxEntriesInWindow int
|
||||
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
|
||||
MaxEntrySize int
|
||||
// Verbose mode prints information about new memory allocation
|
||||
Verbose bool
|
||||
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
|
||||
Hasher Hasher
|
||||
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
|
||||
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
|
||||
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
|
||||
// the oldest entries are overridden for the new ones.
|
||||
HardMaxCacheSize int
|
||||
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove func(key string, entry []byte)
|
||||
|
||||
// Logger is a logging interface and used in combination with `Verbose`
|
||||
// Defaults to `DefaultLogger()`
|
||||
Logger Logger
|
||||
}
|
||||
|
||||
// DefaultConfig initializes config with default values.
|
||||
// When load for BigCache can be predicted in advance then it is better to use custom config.
|
||||
func DefaultConfig(eviction time.Duration) Config {
|
||||
return Config{
|
||||
Shards: 1024,
|
||||
LifeWindow: eviction,
|
||||
CleanWindow: 0,
|
||||
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||
MaxEntrySize: 500,
|
||||
Verbose: true,
|
||||
Hasher: newDefaultHasher(),
|
||||
HardMaxCacheSize: 0,
|
||||
Logger: DefaultLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
// initialShardSize computes initial shard size
|
||||
func (c Config) initialShardSize() int {
|
||||
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
|
||||
}
|
||||
|
||||
// maximumShardSize computes maximum shard size
|
||||
func (c Config) maximumShardSize() int {
|
||||
maxShardSize := 0
|
||||
|
||||
if c.HardMaxCacheSize > 0 {
|
||||
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
|
||||
}
|
||||
|
||||
return maxShardSize
|
||||
}
|
70
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
70
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
@ -1,70 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
timestampSizeInBytes = 8 // Number of bytes used for timestamp
|
||||
hashSizeInBytes = 8 // Number of bytes used for hash
|
||||
keySizeInBytes = 2 // Number of bytes used for size of entry key
|
||||
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
|
||||
)
|
||||
|
||||
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
|
||||
keyLength := len(key)
|
||||
blobLength := len(entry) + headersSizeInBytes + keyLength
|
||||
|
||||
if blobLength > len(*buffer) {
|
||||
*buffer = make([]byte, blobLength)
|
||||
}
|
||||
blob := *buffer
|
||||
|
||||
binary.LittleEndian.PutUint64(blob, timestamp)
|
||||
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
|
||||
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
|
||||
copy(blob[headersSizeInBytes:], key)
|
||||
copy(blob[headersSizeInBytes+keyLength:], entry)
|
||||
|
||||
return blob[:blobLength]
|
||||
}
|
||||
|
||||
func readEntry(data []byte) []byte {
|
||||
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||
|
||||
// copy on read
|
||||
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
|
||||
copy(dst, data[headersSizeInBytes+length:])
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func readTimestampFromEntry(data []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(data)
|
||||
}
|
||||
|
||||
func readKeyFromEntry(data []byte) string {
|
||||
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||
|
||||
// copy on read
|
||||
dst := make([]byte, length)
|
||||
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
|
||||
|
||||
return bytesToString(dst)
|
||||
}
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
|
||||
return *(*string)(unsafe.Pointer(&strHeader))
|
||||
}
|
||||
|
||||
func readHashFromEntry(data []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
|
||||
}
|
||||
|
||||
func resetKeyFromEntry(data []byte) {
|
||||
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
|
||||
}
|
46
vendor/github.com/allegro/bigcache/encoding_test.go
generated
vendored
46
vendor/github.com/allegro/bigcache/encoding_test.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEncodeDecode(t *testing.T) {
|
||||
// given
|
||||
now := uint64(time.Now().Unix())
|
||||
hash := uint64(42)
|
||||
key := "key"
|
||||
data := []byte("data")
|
||||
buffer := make([]byte, 100)
|
||||
|
||||
// when
|
||||
wrapped := wrapEntry(now, hash, key, data, &buffer)
|
||||
|
||||
// then
|
||||
assert.Equal(t, key, readKeyFromEntry(wrapped))
|
||||
assert.Equal(t, hash, readHashFromEntry(wrapped))
|
||||
assert.Equal(t, now, readTimestampFromEntry(wrapped))
|
||||
assert.Equal(t, data, readEntry(wrapped))
|
||||
assert.Equal(t, 100, len(buffer))
|
||||
}
|
||||
|
||||
func TestAllocateBiggerBuffer(t *testing.T) {
|
||||
//given
|
||||
now := uint64(time.Now().Unix())
|
||||
hash := uint64(42)
|
||||
key := "1"
|
||||
data := []byte("2")
|
||||
buffer := make([]byte, 1)
|
||||
|
||||
// when
|
||||
wrapped := wrapEntry(now, hash, key, data, &buffer)
|
||||
|
||||
// then
|
||||
assert.Equal(t, key, readKeyFromEntry(wrapped))
|
||||
assert.Equal(t, hash, readHashFromEntry(wrapped))
|
||||
assert.Equal(t, now, readTimestampFromEntry(wrapped))
|
||||
assert.Equal(t, data, readEntry(wrapped))
|
||||
assert.Equal(t, 2+headersSizeInBytes, len(buffer))
|
||||
}
|
17
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
17
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "fmt"
|
||||
|
||||
// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
|
||||
type EntryNotFoundError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func notFound(key string) error {
|
||||
return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)}
|
||||
}
|
||||
|
||||
// Error returned when entry does not exist.
|
||||
func (e EntryNotFoundError) Error() string {
|
||||
return e.message
|
||||
}
|
28
vendor/github.com/allegro/bigcache/fnv.go
generated
vendored
28
vendor/github.com/allegro/bigcache/fnv.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
|
||||
// Its Sum64 method will lay the value out in big-endian byte order.
|
||||
// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function
|
||||
func newDefaultHasher() Hasher {
|
||||
return fnv64a{}
|
||||
}
|
||||
|
||||
type fnv64a struct{}
|
||||
|
||||
const (
|
||||
// offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
|
||||
offset64 = 14695981039346656037
|
||||
// prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// Sum64 gets the string and returns its uint64 hash value.
|
||||
func (f fnv64a) Sum64(key string) uint64 {
|
||||
var hash uint64 = offset64
|
||||
for i := 0; i < len(key); i++ {
|
||||
hash ^= uint64(key[i])
|
||||
hash *= prime64
|
||||
}
|
||||
|
||||
return hash
|
||||
}
|
18
vendor/github.com/allegro/bigcache/fnv_bench_test.go
generated
vendored
18
vendor/github.com/allegro/bigcache/fnv_bench_test.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "testing"
|
||||
|
||||
var text = "abcdefg"
|
||||
|
||||
func BenchmarkFnvHashSum64(b *testing.B) {
|
||||
h := newDefaultHasher()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Sum64(text)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFnvHashStdLibSum64(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
stdLibFnvSum64(text)
|
||||
}
|
||||
}
|
35
vendor/github.com/allegro/bigcache/fnv_test.go
generated
vendored
35
vendor/github.com/allegro/bigcache/fnv_test.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"hash/fnv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
text string
|
||||
expectedHash uint64
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{"", stdLibFnvSum64("")},
|
||||
{"a", stdLibFnvSum64("a")},
|
||||
{"ab", stdLibFnvSum64("ab")},
|
||||
{"abc", stdLibFnvSum64("abc")},
|
||||
{"some longer and more complicated text", stdLibFnvSum64("some longer and more complicated text")},
|
||||
}
|
||||
|
||||
func TestFnvHashSum64(t *testing.T) {
|
||||
h := newDefaultHasher()
|
||||
for _, testCase := range testCases {
|
||||
hashed := h.Sum64(testCase.text)
|
||||
if hashed != testCase.expectedHash {
|
||||
t.Errorf("hash(%q) = %d want %d", testCase.text, hashed, testCase.expectedHash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stdLibFnvSum64(key string) uint64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(key))
|
||||
return h.Sum64()
|
||||
}
|
8
vendor/github.com/allegro/bigcache/hash.go
generated
vendored
8
vendor/github.com/allegro/bigcache/hash.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
|
||||
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
|
||||
// you can use FarmHash family).
|
||||
type Hasher interface {
|
||||
Sum64(string) uint64
|
||||
}
|
7
vendor/github.com/allegro/bigcache/hash_test.go
generated
vendored
7
vendor/github.com/allegro/bigcache/hash_test.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
type hashStub uint64
|
||||
|
||||
func (stub hashStub) Sum64(_ string) uint64 {
|
||||
return uint64(stub)
|
||||
}
|
122
vendor/github.com/allegro/bigcache/iterator.go
generated
vendored
122
vendor/github.com/allegro/bigcache/iterator.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import "sync"
|
||||
|
||||
type iteratorError string
|
||||
|
||||
func (e iteratorError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
// ErrInvalidIteratorState is reported when iterator is in invalid state
|
||||
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
|
||||
|
||||
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
|
||||
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
|
||||
|
||||
var emptyEntryInfo = EntryInfo{}
|
||||
|
||||
// EntryInfo holds informations about entry in the cache
|
||||
type EntryInfo struct {
|
||||
timestamp uint64
|
||||
hash uint64
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// Key returns entry's underlying key
|
||||
func (e EntryInfo) Key() string {
|
||||
return e.key
|
||||
}
|
||||
|
||||
// Hash returns entry's hash value
|
||||
func (e EntryInfo) Hash() uint64 {
|
||||
return e.hash
|
||||
}
|
||||
|
||||
// Timestamp returns entry's timestamp (time of insertion)
|
||||
func (e EntryInfo) Timestamp() uint64 {
|
||||
return e.timestamp
|
||||
}
|
||||
|
||||
// Value returns entry's underlying value
|
||||
func (e EntryInfo) Value() []byte {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// EntryInfoIterator allows to iterate over entries in the cache
|
||||
type EntryInfoIterator struct {
|
||||
mutex sync.Mutex
|
||||
cache *BigCache
|
||||
currentShard int
|
||||
currentIndex int
|
||||
elements []uint32
|
||||
elementsCount int
|
||||
valid bool
|
||||
}
|
||||
|
||||
// SetNext moves to next element and returns true if it exists.
|
||||
func (it *EntryInfoIterator) SetNext() bool {
|
||||
it.mutex.Lock()
|
||||
|
||||
it.valid = false
|
||||
it.currentIndex++
|
||||
|
||||
if it.elementsCount > it.currentIndex {
|
||||
it.valid = true
|
||||
it.mutex.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
|
||||
it.elements, it.elementsCount = it.cache.shards[i].copyKeys()
|
||||
|
||||
// Non empty shard - stick with it
|
||||
if it.elementsCount > 0 {
|
||||
it.currentIndex = 0
|
||||
it.currentShard = i
|
||||
it.valid = true
|
||||
it.mutex.Unlock()
|
||||
return true
|
||||
}
|
||||
}
|
||||
it.mutex.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
func newIterator(cache *BigCache) *EntryInfoIterator {
|
||||
elements, count := cache.shards[0].copyKeys()
|
||||
|
||||
return &EntryInfoIterator{
|
||||
cache: cache,
|
||||
currentShard: 0,
|
||||
currentIndex: -1,
|
||||
elements: elements,
|
||||
elementsCount: count,
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns current value from the iterator
|
||||
func (it *EntryInfoIterator) Value() (EntryInfo, error) {
|
||||
it.mutex.Lock()
|
||||
|
||||
if !it.valid {
|
||||
it.mutex.Unlock()
|
||||
return emptyEntryInfo, ErrInvalidIteratorState
|
||||
}
|
||||
|
||||
entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex]))
|
||||
|
||||
if err != nil {
|
||||
it.mutex.Unlock()
|
||||
return emptyEntryInfo, ErrCannotRetrieveEntry
|
||||
}
|
||||
it.mutex.Unlock()
|
||||
|
||||
return EntryInfo{
|
||||
timestamp: readTimestampFromEntry(entry),
|
||||
hash: readHashFromEntry(entry),
|
||||
key: readKeyFromEntry(entry),
|
||||
value: readEntry(entry),
|
||||
}, nil
|
||||
}
|
150
vendor/github.com/allegro/bigcache/iterator_test.go
generated
vendored
150
vendor/github.com/allegro/bigcache/iterator_test.go
generated
vendored
@ -1,150 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntriesIterator(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
keysCount := 1000
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: 6 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
value := []byte("value")
|
||||
|
||||
for i := 0; i < keysCount; i++ {
|
||||
cache.Set(fmt.Sprintf("key%d", i), value)
|
||||
}
|
||||
|
||||
// when
|
||||
keys := make(map[string]struct{})
|
||||
iterator := cache.Iterator()
|
||||
|
||||
for iterator.SetNext() {
|
||||
current, err := iterator.Value()
|
||||
|
||||
if err == nil {
|
||||
keys[current.Key()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// then
|
||||
assert.Equal(t, keysCount, len(keys))
|
||||
}
|
||||
|
||||
func TestEntriesIteratorWithMostShardsEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 8,
|
||||
LifeWindow: 6 * time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
}, &clock)
|
||||
|
||||
cache.Set("key", []byte("value"))
|
||||
|
||||
// when
|
||||
iterator := cache.Iterator()
|
||||
|
||||
// then
|
||||
if !iterator.SetNext() {
|
||||
t.Errorf("Iterator should contain at least single element")
|
||||
}
|
||||
|
||||
current, err := iterator.Value()
|
||||
|
||||
// then
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "key", current.Key())
|
||||
assert.Equal(t, uint64(0x3dc94a19365b10ec), current.Hash())
|
||||
assert.Equal(t, []byte("value"), current.Value())
|
||||
assert.Equal(t, uint64(0), current.Timestamp())
|
||||
}
|
||||
|
||||
func TestEntriesIteratorWithConcurrentUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
cache.Set("key", []byte("value"))
|
||||
|
||||
// when
|
||||
iterator := cache.Iterator()
|
||||
|
||||
// then
|
||||
if !iterator.SetNext() {
|
||||
t.Errorf("Iterator should contain at least single element")
|
||||
}
|
||||
|
||||
// Quite ugly but works
|
||||
for i := 0; i < cache.config.Shards; i++ {
|
||||
if oldestEntry, err := cache.shards[i].getOldestEntry(); err == nil {
|
||||
cache.onEvict(oldestEntry, 10, cache.shards[i].removeOldestEntry)
|
||||
}
|
||||
}
|
||||
|
||||
current, err := iterator.Value()
|
||||
|
||||
// then
|
||||
assert.Equal(t, ErrCannotRetrieveEntry, err)
|
||||
assert.Equal(t, "Could not retrieve entry from cache", err.Error())
|
||||
assert.Equal(t, EntryInfo{}, current)
|
||||
}
|
||||
|
||||
func TestEntriesIteratorWithAllShardsEmpty(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
// when
|
||||
iterator := cache.Iterator()
|
||||
|
||||
// then
|
||||
if iterator.SetNext() {
|
||||
t.Errorf("Iterator should not contain any elements")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntriesIteratorInInvalidState(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
})
|
||||
|
||||
// when
|
||||
iterator := cache.Iterator()
|
||||
|
||||
// then
|
||||
_, err := iterator.Value()
|
||||
assert.Equal(t, ErrInvalidIteratorState, err)
|
||||
assert.Equal(t, "Iterator is in invalid state. Use SetNext() to move to next position", err.Error())
|
||||
}
|
30
vendor/github.com/allegro/bigcache/logger.go
generated
vendored
30
vendor/github.com/allegro/bigcache/logger.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Logger is invoked when `Config.Verbose=true`
|
||||
type Logger interface {
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
// this is a safeguard, breaking on compile time in case
|
||||
// `log.Logger` does not adhere to our `Logger` interface.
|
||||
// see https://golang.org/doc/faq#guarantee_satisfies_interface
|
||||
var _ Logger = &log.Logger{}
|
||||
|
||||
// DefaultLogger returns a `Logger` implementation
|
||||
// backed by stdlib's log
|
||||
func DefaultLogger() *log.Logger {
|
||||
return log.New(os.Stdout, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
func newLogger(custom Logger) Logger {
|
||||
if custom != nil {
|
||||
return custom
|
||||
}
|
||||
|
||||
return DefaultLogger()
|
||||
}
|
210
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
210
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
@ -1,210 +0,0 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Number of bytes used to keep information about entry size
|
||||
headerEntrySize = 4
|
||||
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
|
||||
leftMarginIndex = 1
|
||||
// Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
|
||||
// It keeps entries indexes unchanged
|
||||
minimumEmptyBlobSize = 32 + headerEntrySize
|
||||
)
|
||||
|
||||
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
||||
// For every push operation index of entry is returned. It can be used to read the entry later
|
||||
type BytesQueue struct {
|
||||
array []byte
|
||||
capacity int
|
||||
maxCapacity int
|
||||
head int
|
||||
tail int
|
||||
count int
|
||||
rightMargin int
|
||||
headerBuffer []byte
|
||||
verbose bool
|
||||
initialCapacity int
|
||||
}
|
||||
|
||||
type queueError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
// NewBytesQueue initialize new bytes queue.
|
||||
// Initial capacity is used in bytes array allocation
|
||||
// When verbose flag is set then information about memory allocation are printed
|
||||
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
|
||||
return &BytesQueue{
|
||||
array: make([]byte, initialCapacity),
|
||||
capacity: initialCapacity,
|
||||
maxCapacity: maxCapacity,
|
||||
headerBuffer: make([]byte, headerEntrySize),
|
||||
tail: leftMarginIndex,
|
||||
head: leftMarginIndex,
|
||||
rightMargin: leftMarginIndex,
|
||||
verbose: verbose,
|
||||
initialCapacity: initialCapacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset removes all entries from queue
|
||||
func (q *BytesQueue) Reset() {
|
||||
// Just reset indexes
|
||||
q.tail = leftMarginIndex
|
||||
q.head = leftMarginIndex
|
||||
q.rightMargin = leftMarginIndex
|
||||
q.count = 0
|
||||
}
|
||||
|
||||
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
|
||||
// Returns index for pushed data or error if maximum size queue limit is reached.
|
||||
func (q *BytesQueue) Push(data []byte) (int, error) {
|
||||
dataLen := len(data)
|
||||
|
||||
if q.availableSpaceAfterTail() < dataLen+headerEntrySize {
|
||||
if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize {
|
||||
q.tail = leftMarginIndex
|
||||
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
|
||||
return -1, &queueError{"Full queue. Maximum size limit reached."}
|
||||
} else {
|
||||
q.allocateAdditionalMemory(dataLen + headerEntrySize)
|
||||
}
|
||||
}
|
||||
|
||||
index := q.tail
|
||||
|
||||
q.push(data, dataLen)
|
||||
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
|
||||
start := time.Now()
|
||||
if q.capacity < minimum {
|
||||
q.capacity += minimum
|
||||
}
|
||||
q.capacity = q.capacity * 2
|
||||
if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
|
||||
q.capacity = q.maxCapacity
|
||||
}
|
||||
|
||||
oldArray := q.array
|
||||
q.array = make([]byte, q.capacity)
|
||||
|
||||
if leftMarginIndex != q.rightMargin {
|
||||
copy(q.array, oldArray[:q.rightMargin])
|
||||
|
||||
if q.tail < q.head {
|
||||
emptyBlobLen := q.head - q.tail - headerEntrySize
|
||||
q.push(make([]byte, emptyBlobLen), emptyBlobLen)
|
||||
q.head = leftMarginIndex
|
||||
q.tail = q.rightMargin
|
||||
}
|
||||
}
|
||||
|
||||
if q.verbose {
|
||||
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *BytesQueue) push(data []byte, len int) {
|
||||
binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len))
|
||||
q.copy(q.headerBuffer, headerEntrySize)
|
||||
|
||||
q.copy(data, len)
|
||||
|
||||
if q.tail > q.head {
|
||||
q.rightMargin = q.tail
|
||||
}
|
||||
|
||||
q.count++
|
||||
}
|
||||
|
||||
func (q *BytesQueue) copy(data []byte, len int) {
|
||||
q.tail += copy(q.array[q.tail:], data[:len])
|
||||
}
|
||||
|
||||
// Pop reads the oldest entry from queue and moves head pointer to the next one
|
||||
func (q *BytesQueue) Pop() ([]byte, error) {
|
||||
data, size, err := q.peek(q.head)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q.head += headerEntrySize + size
|
||||
q.count--
|
||||
|
||||
if q.head == q.rightMargin {
|
||||
q.head = leftMarginIndex
|
||||
if q.tail == q.rightMargin {
|
||||
q.tail = leftMarginIndex
|
||||
}
|
||||
q.rightMargin = q.tail
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Peek reads the oldest entry from list without moving head pointer
|
||||
func (q *BytesQueue) Peek() ([]byte, error) {
|
||||
data, _, err := q.peek(q.head)
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Get reads entry from index
|
||||
func (q *BytesQueue) Get(index int) ([]byte, error) {
|
||||
data, _, err := q.peek(index)
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Capacity returns number of allocated bytes for queue
|
||||
func (q *BytesQueue) Capacity() int {
|
||||
return q.capacity
|
||||
}
|
||||
|
||||
// Len returns number of entries kept in queue
|
||||
func (q *BytesQueue) Len() int {
|
||||
return q.count
|
||||
}
|
||||
|
||||
// Error returns error message
|
||||
func (e *queueError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
||||
|
||||
if q.count == 0 {
|
||||
return nil, 0, &queueError{"Empty queue"}
|
||||
}
|
||||
|
||||
if index <= 0 {
|
||||
return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
|
||||
}
|
||||
|
||||
if index+headerEntrySize >= len(q.array) {
|
||||
return nil, 0, &queueError{"Index out of range"}
|
||||
}
|
||||
|
||||
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
|
||||
return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil
|
||||
}
|
||||
|
||||
func (q *BytesQueue) availableSpaceAfterTail() int {
|
||||
if q.tail >= q.head {
|
||||
return q.capacity - q.tail
|
||||
}
|
||||
return q.head - q.tail - minimumEmptyBlobSize
|
||||
}
|
||||
|
||||
func (q *BytesQueue) availableSpaceBeforeHead() int {
|
||||
if q.tail >= q.head {
|
||||
return q.head - leftMarginIndex - minimumEmptyBlobSize
|
||||
}
|
||||
return q.head - q.tail - minimumEmptyBlobSize
|
||||
}
|
365
vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go
generated
vendored
365
vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go
generated
vendored
@ -1,365 +0,0 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPushAndPop(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(10, 0, true)
|
||||
entry := []byte("hello")
|
||||
|
||||
// when
|
||||
_, err := queue.Pop()
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
|
||||
// when
|
||||
queue.Push(entry)
|
||||
|
||||
// then
|
||||
assert.Equal(t, entry, pop(queue))
|
||||
}
|
||||
|
||||
func TestLen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(100, 0, false)
|
||||
entry := []byte("hello")
|
||||
assert.Zero(t, queue.Len())
|
||||
|
||||
// when
|
||||
queue.Push(entry)
|
||||
|
||||
// then
|
||||
assert.Equal(t, queue.Len(), 1)
|
||||
}
|
||||
|
||||
func TestPeek(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(100, 0, false)
|
||||
entry := []byte("hello")
|
||||
|
||||
// when
|
||||
read, err := queue.Peek()
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
assert.Nil(t, read)
|
||||
|
||||
// when
|
||||
queue.Push(entry)
|
||||
read, err = queue.Peek()
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pop(queue), read)
|
||||
assert.Equal(t, entry, read)
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(100, 0, false)
|
||||
entry := []byte("hello")
|
||||
|
||||
// when
|
||||
queue.Push(entry)
|
||||
queue.Push(entry)
|
||||
queue.Push(entry)
|
||||
|
||||
queue.Reset()
|
||||
read, err := queue.Peek()
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
assert.Nil(t, read)
|
||||
|
||||
// when
|
||||
queue.Push(entry)
|
||||
read, err = queue.Peek()
|
||||
|
||||
// then
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pop(queue), read)
|
||||
assert.Equal(t, entry, read)
|
||||
|
||||
// when
|
||||
read, err = queue.Peek()
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
assert.Nil(t, read)
|
||||
}
|
||||
|
||||
func TestReuseAvailableSpace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(100, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 70))
|
||||
queue.Push(blob('b', 20))
|
||||
queue.Pop()
|
||||
queue.Push(blob('c', 20))
|
||||
|
||||
// then
|
||||
assert.Equal(t, 100, queue.Capacity())
|
||||
assert.Equal(t, blob('b', 20), pop(queue))
|
||||
}
|
||||
|
||||
func TestAllocateAdditionalSpace(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(11, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push([]byte("hello1"))
|
||||
queue.Push([]byte("hello2"))
|
||||
|
||||
// then
|
||||
assert.Equal(t, 22, queue.Capacity())
|
||||
}
|
||||
|
||||
func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereHeadIsBeforeTail(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(25, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
|
||||
queue.Push(blob('b', 6)) // additional 10 bytes
|
||||
queue.Pop() // space freed, 7 bytes available at the beginning
|
||||
queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
|
||||
|
||||
// then
|
||||
assert.Equal(t, 50, queue.Capacity())
|
||||
assert.Equal(t, blob('b', 6), pop(queue))
|
||||
assert.Equal(t, blob('c', 6), pop(queue))
|
||||
}
|
||||
|
||||
func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereHeadIsBeforeTail(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(25, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 3)) // header + entry + left margin = 8 bytes
|
||||
index, _ := queue.Push(blob('b', 6)) // additional 10 bytes
|
||||
queue.Pop() // space freed, 7 bytes available at the beginning
|
||||
newestIndex, _ := queue.Push(blob('c', 6)) // 10 bytes needed, 14 available but not in one segment, allocate additional memory
|
||||
|
||||
// then
|
||||
assert.Equal(t, 50, queue.Capacity())
|
||||
assert.Equal(t, blob('b', 6), get(queue, index))
|
||||
assert.Equal(t, blob('c', 6), get(queue, newestIndex))
|
||||
}
|
||||
|
||||
func TestAllocateAdditionalSpaceForInsufficientFreeFragmentedSpaceWhereTailIsBeforeHead(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(100, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
|
||||
queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
|
||||
queue.Pop() // space freed at the beginning
|
||||
queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
|
||||
queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
|
||||
|
||||
// then
|
||||
assert.Equal(t, 200, queue.Capacity())
|
||||
assert.Equal(t, blob('c', 30), pop(queue))
|
||||
// empty blob fills space between tail and head,
|
||||
// created when additional memory was allocated,
|
||||
// it keeps current entries indexes unchanged
|
||||
assert.Equal(t, blob(0, 36), pop(queue))
|
||||
assert.Equal(t, blob('b', 10), pop(queue))
|
||||
assert.Equal(t, blob('d', 40), pop(queue))
|
||||
}
|
||||
|
||||
func TestUnchangedEntriesIndexesAfterAdditionalMemoryAllocationWhereTailIsBeforeHead(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(100, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 70)) // header + entry + left margin = 75 bytes
|
||||
index, _ := queue.Push(blob('b', 10)) // 75 + 10 + 4 = 89 bytes
|
||||
queue.Pop() // space freed at the beginning
|
||||
queue.Push(blob('c', 30)) // 34 bytes used at the beginning, tail pointer is before head pointer
|
||||
newestIndex, _ := queue.Push(blob('d', 40)) // 44 bytes needed but no available in one segment, allocate new memory
|
||||
|
||||
// then
|
||||
assert.Equal(t, 200, queue.Capacity())
|
||||
assert.Equal(t, blob('b', 10), get(queue, index))
|
||||
assert.Equal(t, blob('d', 40), get(queue, newestIndex))
|
||||
}
|
||||
|
||||
func TestAllocateAdditionalSpaceForValueBiggerThanInitQueue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(11, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 100))
|
||||
|
||||
// then
|
||||
assert.Equal(t, blob('a', 100), pop(queue))
|
||||
assert.Equal(t, 230, queue.Capacity())
|
||||
}
|
||||
|
||||
func TestAllocateAdditionalSpaceForValueBiggerThanQueue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(21, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push(make([]byte, 2))
|
||||
queue.Push(make([]byte, 2))
|
||||
queue.Push(make([]byte, 100))
|
||||
|
||||
// then
|
||||
queue.Pop()
|
||||
queue.Pop()
|
||||
assert.Equal(t, make([]byte, 100), pop(queue))
|
||||
assert.Equal(t, 250, queue.Capacity())
|
||||
}
|
||||
|
||||
func TestPopWholeQueue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(13, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push([]byte("a"))
|
||||
queue.Push([]byte("b"))
|
||||
queue.Pop()
|
||||
queue.Pop()
|
||||
queue.Push([]byte("c"))
|
||||
|
||||
// then
|
||||
assert.Equal(t, 13, queue.Capacity())
|
||||
assert.Equal(t, []byte("c"), pop(queue))
|
||||
}
|
||||
|
||||
func TestGetEntryFromIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(20, 0, false)
|
||||
|
||||
// when
|
||||
queue.Push([]byte("a"))
|
||||
index, _ := queue.Push([]byte("b"))
|
||||
queue.Push([]byte("c"))
|
||||
result, _ := queue.Get(index)
|
||||
|
||||
// then
|
||||
assert.Equal(t, []byte("b"), result)
|
||||
}
|
||||
|
||||
func TestGetEntryFromInvalidIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(1, 0, false)
|
||||
queue.Push([]byte("a"))
|
||||
|
||||
// when
|
||||
result, err := queue.Get(0)
|
||||
|
||||
// then
|
||||
assert.Nil(t, result)
|
||||
assert.EqualError(t, err, "Index must be grater than zero. Invalid index.")
|
||||
}
|
||||
|
||||
func TestGetEntryFromIndexOutOfRange(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(1, 0, false)
|
||||
queue.Push([]byte("a"))
|
||||
|
||||
// when
|
||||
result, err := queue.Get(42)
|
||||
|
||||
// then
|
||||
assert.Nil(t, result)
|
||||
assert.EqualError(t, err, "Index out of range")
|
||||
}
|
||||
|
||||
func TestGetEntryFromEmptyQueue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(13, 0, false)
|
||||
|
||||
// when
|
||||
result, err := queue.Get(1)
|
||||
|
||||
// then
|
||||
assert.Nil(t, result)
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
}
|
||||
|
||||
func TestMaxSizeLimit(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
queue := NewBytesQueue(30, 50, false)
|
||||
|
||||
// when
|
||||
queue.Push(blob('a', 25))
|
||||
queue.Push(blob('b', 5))
|
||||
capacity := queue.Capacity()
|
||||
_, err := queue.Push(blob('c', 15))
|
||||
|
||||
// then
|
||||
assert.Equal(t, 50, capacity)
|
||||
assert.EqualError(t, err, "Full queue. Maximum size limit reached.")
|
||||
assert.Equal(t, blob('a', 25), pop(queue))
|
||||
assert.Equal(t, blob('b', 5), pop(queue))
|
||||
}
|
||||
|
||||
func pop(queue *BytesQueue) []byte {
|
||||
entry, err := queue.Pop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func get(queue *BytesQueue, index int) []byte {
|
||||
entry, err := queue.Get(index)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func blob(char byte, len int) []byte {
|
||||
b := make([]byte, len)
|
||||
for index := range b {
|
||||
b[index] = char
|
||||
}
|
||||
return b
|
||||
}
|
105
vendor/github.com/allegro/bigcache/server/README.md
generated
vendored
105
vendor/github.com/allegro/bigcache/server/README.md
generated
vendored
@ -1,105 +0,0 @@
|
||||
# BigCache HTTP Server
|
||||
|
||||
This is a basic HTTP server implementation for BigCache. It has a basic RESTful API and is designed for easy operational deployments. This server is intended to be consumed as a standalone executable, for things like Cloud Foundry, Heroku, etc. A design goal is versatility, so if you want to cache pictures, software artifacts, text, or any type of bit, the BigCache HTTP Server should fit your needs.
|
||||
|
||||
```bash
|
||||
# cache API.
|
||||
GET /api/v1/cache/{key}
|
||||
PUT /api/v1/cache/{key}
|
||||
DELETE /api/v1/cache/{key}
|
||||
|
||||
# stats API.
|
||||
GET /api/v1/stats
|
||||
```
|
||||
|
||||
The cache API is designed for ease-of-use caching and accepts any content type. The stats API will return hit and miss statistics about the cache since the last time the server was started - they will reset whenever the server is restarted.
|
||||
|
||||
### Notes for Operators
|
||||
|
||||
1. No SSL support, currently.
|
||||
1. No authentication, currently.
|
||||
1. Stats from the stats API are not persistent.
|
||||
1. The easiest way to clean the cache is to restart the process; it takes less than a second to initialise.
|
||||
1. There is no replication or clustering.
|
||||
|
||||
### Command-line Interface
|
||||
|
||||
```powershell
|
||||
PS C:\go\src\github.com\mxplusb\bigcache\server> .\server.exe -h
|
||||
Usage of C:\go\src\github.com\mxplusb\bigcache\server\server.exe:
|
||||
-lifetime duration
|
||||
Lifetime of each cache object. (default 10m0s)
|
||||
-logfile string
|
||||
Location of the logfile.
|
||||
-max int
|
||||
Maximum amount of data in the cache in MB. (default 8192)
|
||||
-maxInWindow int
|
||||
Used only in initial memory allocation. (default 600000)
|
||||
-maxShardEntrySize int
|
||||
The maximum size of each object stored in a shard. Used only in initial memory allocation. (default 500)
|
||||
-port int
|
||||
The port to listen on. (default 9090)
|
||||
-shards int
|
||||
Number of shards for the cache. (default 1024)
|
||||
-v Verbose logging.
|
||||
-version
|
||||
Print server version.
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
$ curl -v -XPUT localhost:9090/api/v1/cache/example -d "yay!"
|
||||
* Trying 127.0.0.1...
|
||||
* Connected to localhost (127.0.0.1) port 9090 (#0)
|
||||
> PUT /api/v1/cache/example HTTP/1.1
|
||||
> Host: localhost:9090
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> Content-Length: 4
|
||||
> Content-Type: application/x-www-form-urlencoded
|
||||
>
|
||||
* upload completely sent off: 4 out of 4 bytes
|
||||
< HTTP/1.1 201 Created
|
||||
< Date: Fri, 17 Nov 2017 03:50:07 GMT
|
||||
< Content-Length: 0
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
$
|
||||
$ curl -v -XGET localhost:9090/api/v1/cache/example
|
||||
Note: Unnecessary use of -X or --request, GET is already inferred.
|
||||
* Trying 127.0.0.1...
|
||||
* Connected to localhost (127.0.0.1) port 9090 (#0)
|
||||
> GET /api/v1/cache/example HTTP/1.1
|
||||
> Host: localhost:9090
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Fri, 17 Nov 2017 03:50:23 GMT
|
||||
< Content-Length: 4
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
yay!
|
||||
```
|
||||
|
||||
The server does log basic metrics:
|
||||
|
||||
```bash
|
||||
$ ./server
|
||||
2017/11/16 22:49:22 cache initialised.
|
||||
2017/11/16 22:49:22 starting server on :9090
|
||||
2017/11/16 22:50:07 stored "example" in cache.
|
||||
2017/11/16 22:50:07 request took 277000ns.
|
||||
2017/11/16 22:50:23 request took 9000ns.
|
||||
```
|
||||
|
||||
### Acquiring Natively
|
||||
|
||||
This is native Go with no external dependencies, so it will compile for all supported Golang platforms. To build:
|
||||
|
||||
```bash
|
||||
go build server.go
|
||||
```
|
87
vendor/github.com/allegro/bigcache/server/cache_handlers.go
generated
vendored
87
vendor/github.com/allegro/bigcache/server/cache_handlers.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func cacheIndexHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
getCacheHandler(w, r)
|
||||
case http.MethodPut:
|
||||
putCacheHandler(w, r)
|
||||
case http.MethodDelete:
|
||||
deleteCacheHandler(w, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// handles get requests.
|
||||
func getCacheHandler(w http.ResponseWriter, r *http.Request) {
|
||||
target := r.URL.Path[len(cachePath):]
|
||||
if target == "" {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("can't get a key if there is no key."))
|
||||
log.Print("empty request.")
|
||||
return
|
||||
}
|
||||
entry, err := cache.Get(target)
|
||||
if err != nil {
|
||||
errMsg := (err).Error()
|
||||
if strings.Contains(errMsg, "not found") {
|
||||
log.Print(err)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
log.Print(err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(entry)
|
||||
}
|
||||
|
||||
func putCacheHandler(w http.ResponseWriter, r *http.Request) {
|
||||
target := r.URL.Path[len(cachePath):]
|
||||
if target == "" {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("can't put a key if there is no key."))
|
||||
log.Print("empty request.")
|
||||
return
|
||||
}
|
||||
|
||||
entry, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := cache.Set(target, []byte(entry)); err != nil {
|
||||
log.Print(err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Printf("stored \"%s\" in cache.", target)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
|
||||
// delete cache objects.
|
||||
func deleteCacheHandler(w http.ResponseWriter, r *http.Request) {
|
||||
target := r.URL.Path[len(cachePath):]
|
||||
if err := cache.Delete(target); err != nil {
|
||||
if strings.Contains((err).Error(), "not found") {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
log.Printf("%s not found.", target)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
log.Printf("internal cache error: %s", err)
|
||||
}
|
||||
// this is what the RFC says to use when calling DELETE.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
29
vendor/github.com/allegro/bigcache/server/middleware.go
generated
vendored
29
vendor/github.com/allegro/bigcache/server/middleware.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// our base middleware implementation.
|
||||
type service func(http.Handler) http.Handler
|
||||
|
||||
// chain load middleware services.
|
||||
func serviceLoader(h http.Handler, svcs ...service) http.Handler {
|
||||
for _, svc := range svcs {
|
||||
h = svc(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// middleware for request length metrics.
|
||||
func requestMetrics(l *log.Logger) service {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
h.ServeHTTP(w, r)
|
||||
l.Printf("%s request to %s took %vns.", r.Method, r.URL.Path, time.Now().Sub(start).Nanoseconds())
|
||||
})
|
||||
}
|
||||
}
|
85
vendor/github.com/allegro/bigcache/server/server.go
generated
vendored
85
vendor/github.com/allegro/bigcache/server/server.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/allegro/bigcache"
|
||||
)
|
||||
|
||||
const (
|
||||
// base HTTP paths.
|
||||
apiVersion = "v1"
|
||||
apiBasePath = "/api/" + apiVersion + "/"
|
||||
|
||||
// path to cache.
|
||||
cachePath = apiBasePath + "cache/"
|
||||
statsPath = apiBasePath + "stats"
|
||||
|
||||
// server version.
|
||||
version = "1.0.0"
|
||||
)
|
||||
|
||||
var (
|
||||
port int
|
||||
logfile string
|
||||
ver bool
|
||||
|
||||
// cache-specific settings.
|
||||
cache *bigcache.BigCache
|
||||
config = bigcache.Config{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&config.Verbose, "v", false, "Verbose logging.")
|
||||
flag.IntVar(&config.Shards, "shards", 1024, "Number of shards for the cache.")
|
||||
flag.IntVar(&config.MaxEntriesInWindow, "maxInWindow", 1000*10*60, "Used only in initial memory allocation.")
|
||||
flag.DurationVar(&config.LifeWindow, "lifetime", 100000*100000*60, "Lifetime of each cache object.")
|
||||
flag.IntVar(&config.HardMaxCacheSize, "max", 8192, "Maximum amount of data in the cache in MB.")
|
||||
flag.IntVar(&config.MaxEntrySize, "maxShardEntrySize", 500, "The maximum size of each object stored in a shard. Used only in initial memory allocation.")
|
||||
flag.IntVar(&port, "port", 9090, "The port to listen on.")
|
||||
flag.StringVar(&logfile, "logfile", "", "Location of the logfile.")
|
||||
flag.BoolVar(&ver, "version", false, "Print server version.")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if ver {
|
||||
fmt.Printf("BigCache HTTP Server v%s", version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var logger *log.Logger
|
||||
|
||||
if logfile == "" {
|
||||
logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
} else {
|
||||
f, err := os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logger = log.New(f, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
var err error
|
||||
cache, err = bigcache.NewBigCache(config)
|
||||
if err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
|
||||
logger.Print("cache initialised.")
|
||||
|
||||
// let the middleware log.
|
||||
http.Handle(cachePath, serviceLoader(cacheIndexHandler(), requestMetrics(logger)))
|
||||
http.Handle(statsPath, serviceLoader(statsIndexHandler(), requestMetrics(logger)))
|
||||
|
||||
logger.Printf("starting server on :%d", port)
|
||||
|
||||
strPort := ":" + strconv.Itoa(port)
|
||||
log.Fatal("ListenAndServe: ", http.ListenAndServe(strPort, nil))
|
||||
}
|
185
vendor/github.com/allegro/bigcache/server/server_test.go
generated
vendored
185
vendor/github.com/allegro/bigcache/server/server_test.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/allegro/bigcache"
|
||||
)
|
||||
|
||||
const (
|
||||
testBaseString = "http://bigcache.org"
|
||||
)
|
||||
|
||||
func testCacheSetup() {
|
||||
cache, _ = bigcache.NewBigCache(bigcache.Config{
|
||||
Shards: 1024,
|
||||
LifeWindow: 10 * time.Minute,
|
||||
MaxEntriesInWindow: 1000 * 10 * 60,
|
||||
MaxEntrySize: 500,
|
||||
Verbose: true,
|
||||
HardMaxCacheSize: 8192,
|
||||
OnRemove: nil,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testCacheSetup()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestGetWithNoKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
getCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 400 {
|
||||
t.Errorf("want: 400; got: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWithMissingKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/doesNotExist", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
getCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 404 {
|
||||
t.Errorf("want: 404; got: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
req := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/getKey", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// set something.
|
||||
cache.Set("getKey", []byte("123"))
|
||||
|
||||
getCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("cannot deserialise test response: %s", err)
|
||||
}
|
||||
|
||||
if string(body) != "123" {
|
||||
t.Errorf("want: 123; got: %s.\n\tcan't get existing key getKey.", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer([]byte("123")))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
putCacheHandler(rr, req)
|
||||
|
||||
testPutKeyResult, err := cache.Get("putKey")
|
||||
if err != nil {
|
||||
t.Errorf("error returning cache entry: %s", err)
|
||||
}
|
||||
|
||||
if string(testPutKeyResult) != "123" {
|
||||
t.Errorf("want: 123; got: %s.\n\tcan't get PUT key putKey.", string(testPutKeyResult))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutEmptyKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
putCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 400 {
|
||||
t.Errorf("want: 400; got: %d.\n\tempty key insertion should return with 400", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteEmptyKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/", bytes.NewBuffer([]byte("123")))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
deleteCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 404 {
|
||||
t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete empty keys.", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteInvalidKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/invalidDeleteKey", bytes.NewBuffer([]byte("123")))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
deleteCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 404 {
|
||||
t.Errorf("want: 404; got: %d.\n\tapparently we're trying to delete invalid keys.", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
req := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testDeleteKey", bytes.NewBuffer([]byte("123")))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
if err := cache.Set("testDeleteKey", []byte("123")); err != nil {
|
||||
t.Errorf("can't set key for testing. %s", err)
|
||||
}
|
||||
|
||||
deleteCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStats(t *testing.T) {
|
||||
t.Parallel()
|
||||
var testStats bigcache.Stats
|
||||
|
||||
req := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// manually enter a key so there are some stats. get it so there's at least 1 hit.
|
||||
if err := cache.Set("incrementStats", []byte("123")); err != nil {
|
||||
t.Errorf("error setting cache value. error %s", err)
|
||||
}
|
||||
// it's okay if this fails, since we'll catch it downstream.
|
||||
if _, err := cache.Get("incrementStats"); err != nil {
|
||||
t.Errorf("can't find incrementStats. error: %s", err)
|
||||
}
|
||||
|
||||
getCacheStatsHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil {
|
||||
t.Errorf("error decoding cache stats. error: %s", err)
|
||||
}
|
||||
|
||||
if testStats.Hits == 0 {
|
||||
t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
|
||||
}
|
||||
}
|
33
vendor/github.com/allegro/bigcache/server/stats_handler.go
generated
vendored
33
vendor/github.com/allegro/bigcache/server/stats_handler.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// index for stats handle
|
||||
func statsIndexHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
getCacheStatsHandler(w, r)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// returns the cache's statistics.
|
||||
func getCacheStatsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
target, err := json.Marshal(cache.Stats())
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
log.Printf("cannot marshal cache stats. error: %s", err)
|
||||
return
|
||||
}
|
||||
// since we're sending a struct, make it easy for consumers to interface.
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Write(target)
|
||||
return
|
||||
}
|
229
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
229
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
@ -1,229 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/allegro/bigcache/queue"
|
||||
)
|
||||
|
||||
type cacheShard struct {
|
||||
hashmap map[uint64]uint32
|
||||
entries queue.BytesQueue
|
||||
lock sync.RWMutex
|
||||
entryBuffer []byte
|
||||
onRemove func(wrappedEntry []byte)
|
||||
|
||||
isVerbose bool
|
||||
logger Logger
|
||||
clock clock
|
||||
lifeWindow uint64
|
||||
|
||||
stats Stats
|
||||
}
|
||||
|
||||
type onRemoveCallback func(wrappedEntry []byte)
|
||||
|
||||
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||
s.lock.RLock()
|
||||
itemIndex := s.hashmap[hashedKey]
|
||||
|
||||
if itemIndex == 0 {
|
||||
s.lock.RUnlock()
|
||||
s.miss()
|
||||
return nil, notFound(key)
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
if err != nil {
|
||||
s.lock.RUnlock()
|
||||
s.miss()
|
||||
return nil, err
|
||||
}
|
||||
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
|
||||
if s.isVerbose {
|
||||
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
s.collision()
|
||||
return nil, notFound(key)
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
s.hit()
|
||||
return readEntry(wrappedEntry), nil
|
||||
}
|
||||
|
||||
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||
currentTimestamp := uint64(s.clock.epoch())
|
||||
|
||||
s.lock.Lock()
|
||||
|
||||
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
|
||||
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
|
||||
resetKeyFromEntry(previousEntry)
|
||||
}
|
||||
}
|
||||
|
||||
if oldestEntry, err := s.entries.Peek(); err == nil {
|
||||
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
|
||||
}
|
||||
|
||||
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
|
||||
|
||||
for {
|
||||
if index, err := s.entries.Push(w); err == nil {
|
||||
s.hashmap[hashedKey] = uint32(index)
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
if s.removeOldestEntry() != nil {
|
||||
s.lock.Unlock()
|
||||
return fmt.Errorf("entry is bigger than max shard size")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||
s.lock.RLock()
|
||||
itemIndex := s.hashmap[hashedKey]
|
||||
|
||||
if itemIndex == 0 {
|
||||
s.lock.RUnlock()
|
||||
s.delmiss()
|
||||
return notFound(key)
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
if err != nil {
|
||||
s.lock.RUnlock()
|
||||
s.delmiss()
|
||||
return err
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
|
||||
s.lock.Lock()
|
||||
{
|
||||
delete(s.hashmap, hashedKey)
|
||||
s.onRemove(wrappedEntry)
|
||||
resetKeyFromEntry(wrappedEntry)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
s.delhit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
|
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||
if currentTimestamp-oldestTimestamp > s.lifeWindow {
|
||||
evict()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
|
||||
s.lock.Lock()
|
||||
for {
|
||||
if oldestEntry, err := s.entries.Peek(); err != nil {
|
||||
break
|
||||
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
|
||||
break
|
||||
}
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *cacheShard) getOldestEntry() ([]byte, error) {
|
||||
return s.entries.Peek()
|
||||
}
|
||||
|
||||
func (s *cacheShard) getEntry(index int) ([]byte, error) {
|
||||
return s.entries.Get(index)
|
||||
}
|
||||
|
||||
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
||||
keys = make([]uint32, len(s.hashmap))
|
||||
|
||||
s.lock.RLock()
|
||||
|
||||
for _, index := range s.hashmap {
|
||||
keys[next] = index
|
||||
next++
|
||||
}
|
||||
|
||||
s.lock.RUnlock()
|
||||
return keys, next
|
||||
}
|
||||
|
||||
func (s *cacheShard) removeOldestEntry() error {
|
||||
oldest, err := s.entries.Pop()
|
||||
if err == nil {
|
||||
hash := readHashFromEntry(oldest)
|
||||
delete(s.hashmap, hash)
|
||||
s.onRemove(oldest)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *cacheShard) reset(config Config) {
|
||||
s.lock.Lock()
|
||||
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
|
||||
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
|
||||
s.entries.Reset()
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *cacheShard) len() int {
|
||||
s.lock.RLock()
|
||||
res := len(s.hashmap)
|
||||
s.lock.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *cacheShard) getStats() Stats {
|
||||
var stats = Stats{
|
||||
Hits: atomic.LoadInt64(&s.stats.Hits),
|
||||
Misses: atomic.LoadInt64(&s.stats.Misses),
|
||||
DelHits: atomic.LoadInt64(&s.stats.DelHits),
|
||||
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
|
||||
Collisions: atomic.LoadInt64(&s.stats.Collisions),
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (s *cacheShard) hit() {
|
||||
atomic.AddInt64(&s.stats.Hits, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) miss() {
|
||||
atomic.AddInt64(&s.stats.Misses, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) delhit() {
|
||||
atomic.AddInt64(&s.stats.DelHits, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) delmiss() {
|
||||
atomic.AddInt64(&s.stats.DelMisses, 1)
|
||||
}
|
||||
|
||||
func (s *cacheShard) collision() {
|
||||
atomic.AddInt64(&s.stats.Collisions, 1)
|
||||
}
|
||||
|
||||
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
|
||||
return &cacheShard{
|
||||
hashmap: make(map[uint64]uint32, config.initialShardSize()),
|
||||
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
|
||||
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
|
||||
onRemove: callback,
|
||||
|
||||
isVerbose: config.Verbose,
|
||||
logger: newLogger(config.Logger),
|
||||
clock: clock,
|
||||
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||
}
|
||||
}
|
15
vendor/github.com/allegro/bigcache/stats.go
generated
vendored
15
vendor/github.com/allegro/bigcache/stats.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
// Stats stores cache statistics
|
||||
type Stats struct {
|
||||
// Hits is a number of successfully found keys
|
||||
Hits int64 `json:"hits"`
|
||||
// Misses is a number of not found keys
|
||||
Misses int64 `json:"misses"`
|
||||
// DelHits is a number of successfully deleted keys
|
||||
DelHits int64 `json:"delete_hits"`
|
||||
// DelMisses is a number of not deleted keys
|
||||
DelMisses int64 `json:"delete_misses"`
|
||||
// Collisions is a number of happened key-collisions
|
||||
Collisions int64 `json:"collisions"`
|
||||
}
|
16
vendor/github.com/allegro/bigcache/utils.go
generated
vendored
16
vendor/github.com/allegro/bigcache/utils.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
package bigcache
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func convertMBToBytes(value int) int {
|
||||
return value * 1024 * 1024
|
||||
}
|
||||
|
||||
func isPowerOfTwo(number int) bool {
|
||||
return (number & (number - 1)) == 0
|
||||
}
|
17
vendor/github.com/aristanetworks/goarista/.travis.yml
generated
vendored
17
vendor/github.com/aristanetworks/goarista/.travis.yml
generated
vendored
@ -1,17 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.x
|
||||
- master
|
||||
before_install:
|
||||
- go get -v github.com/golang/lint/golint
|
||||
- go get -v -t -d ./...
|
||||
after_success:
|
||||
- make coverdata
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
script:
|
||||
- make -j4 check GOTEST_FLAGS=-v
|
||||
notifications:
|
||||
slack:
|
||||
secure: MO/3LqbyALbi9vAY3pZetp/LfRuKEPAYEUya7XKmTWA3OFHYkTGqJWNosVkFJd6eSKwnc3HP4jlKADEBNVxADHzcA3uMPUQi1mIcNk/Ps1WWMNDv1liE2XOoOmHSHZ/8ksk6TNq83x+d17ZffYq8KAH6iKNKvllO1JzQPgJJdf+cNXQQlg6uPSe+ggMpjqVLkKcHqA4L3/BWo6fNcyvkqaN3uXcEzYPi7Nb2q9tl0ja6ToyZV4H6SinwitZmpedN3RkBcm4fKmGyw5ikzH93ycA5SvWrnXTh1dJvq6DU0FV7iwI6oqPTbAUc3FE5g7aEkK0qVR21s2j+KNaOLnuX10ZGQFwj2r3SW2REHq4j+qqFla/2EmSFZJt3GXYS+plmGCxqCgyjSw6tTi7LaGZ/mWBJEA9/EaXG1NkwlQYx5tdUMeGj77OczjXClynpb2hJ7MM2b32Rnp0JmNaXAh01SmClo+8nDWuksAsIdPtWsbF0/XHmEJiqpu8ojvVXOQIbPt43bjG7PS1t5jaRAU/N1n56SiCGgCSGd3Ui5eX5vmgWdpZMl8NG05G4LFsgmkdphRT5fru0C2PrhNZYRDGWs63XKapBxsvfqGzdHxTtYuaDjHjrI+9w0BC/8kEzSWoPmabQ5ci4wf4DeplcIay4tDMgMSo8pGAf52vrne4rmUo=
|
||||
on_success: change
|
25
vendor/github.com/aristanetworks/goarista/AUTHORS
generated
vendored
25
vendor/github.com/aristanetworks/goarista/AUTHORS
generated
vendored
@ -1,25 +0,0 @@
|
||||
All contributors are required to sign a "Contributor License Agreement" at
|
||||
<TBD>
|
||||
|
||||
The following organizations and people have contributed code to this library.
|
||||
(Please keep both lists sorted alphabetically.)
|
||||
|
||||
|
||||
Arista Networks, Inc.
|
||||
|
||||
|
||||
Benoit Sigoure
|
||||
Fabrice Rabaute
|
||||
|
||||
|
||||
|
||||
The list of individual contributors for code currently in HEAD can be obtained
|
||||
at any time with the following script:
|
||||
|
||||
find . -type f \
|
||||
| while read i; do \
|
||||
git blame -t $i 2>/dev/null; \
|
||||
done \
|
||||
| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \
|
||||
| awk '{a[$0]++; t++} END{for(n in a) print n}' \
|
||||
| sort
|
177
vendor/github.com/aristanetworks/goarista/COPYING
generated
vendored
177
vendor/github.com/aristanetworks/goarista/COPYING
generated
vendored
@ -1,177 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
14
vendor/github.com/aristanetworks/goarista/Dockerfile
generated
vendored
14
vendor/github.com/aristanetworks/goarista/Dockerfile
generated
vendored
@ -1,14 +0,0 @@
|
||||
# Copyright (c) 2016 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the COPYING file.
|
||||
|
||||
# TODO: move this to cmd/ockafka (https://github.com/docker/hub-feedback/issues/292)
|
||||
FROM golang:1.10.3
|
||||
|
||||
RUN mkdir -p /go/src/github.com/aristanetworks/goarista/cmd
|
||||
WORKDIR /go/src/github.com/aristanetworks/goarista
|
||||
COPY ./ .
|
||||
RUN go get -d ./cmd/ockafka/... \
|
||||
&& go install ./cmd/ockafka
|
||||
|
||||
ENTRYPOINT ["/go/bin/ockafka"]
|
59
vendor/github.com/aristanetworks/goarista/Makefile
generated
vendored
59
vendor/github.com/aristanetworks/goarista/Makefile
generated
vendored
@ -1,59 +0,0 @@
|
||||
# Copyright (c) 2015 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the COPYING file.
|
||||
|
||||
GO := go
|
||||
TEST_TIMEOUT := 30s
|
||||
GOTEST_FLAGS :=
|
||||
|
||||
DEFAULT_GOPATH := $${GOPATH%%:*}
|
||||
GOPATH_BIN := $(DEFAULT_GOPATH)/bin
|
||||
GOPATH_PKG := $(DEFAULT_GOPATH)/pkg
|
||||
GOLINT := $(GOPATH_BIN)/golint
|
||||
GOFOLDERS := find . -type d ! -path "./.git/*"
|
||||
|
||||
all: install
|
||||
|
||||
install:
|
||||
$(GO) install ./...
|
||||
|
||||
check: vet test fmtcheck lint
|
||||
|
||||
COVER_PKGS := key test
|
||||
COVER_MODE := count
|
||||
coverdata:
|
||||
echo 'mode: $(COVER_MODE)' >coverage.out
|
||||
for dir in $(COVER_PKGS); do \
|
||||
$(GO) test -covermode=$(COVER_MODE) -coverprofile=cov.out-t ./$$dir || exit; \
|
||||
tail -n +2 cov.out-t >> coverage.out && \
|
||||
rm cov.out-t; \
|
||||
done;
|
||||
|
||||
coverage: coverdata
|
||||
$(GO) tool cover -html=coverage.out
|
||||
rm -f coverage.out
|
||||
|
||||
fmtcheck:
|
||||
errors=`gofmt -l .`; if test -n "$$errors"; then echo Check these files for style errors:; echo "$$errors"; exit 1; fi
|
||||
find . -name '*.go' ! -name '*.pb.go' -exec ./check_line_len.awk {} +
|
||||
./check_copyright_notice.sh
|
||||
|
||||
vet:
|
||||
$(GO) vet ./...
|
||||
|
||||
lint:
|
||||
lint=`$(GOFOLDERS) | xargs -L 1 $(GOLINT) | fgrep -v .pb.go`; if test -n "$$lint"; then echo "$$lint"; exit 1; fi
|
||||
# The above is ugly, but unfortunately golint doesn't exit 1 when it finds
|
||||
# lint. See https://github.com/golang/lint/issues/65
|
||||
|
||||
test:
|
||||
$(GO) test $(GOTEST_FLAGS) -timeout=$(TEST_TIMEOUT) ./...
|
||||
|
||||
docker:
|
||||
docker build -f cmd/occlient/Dockerfile .
|
||||
|
||||
clean:
|
||||
rm -rf $(GOPATH_PKG)/*/github.com/aristanetworks/goarista
|
||||
$(GO) clean ./...
|
||||
|
||||
.PHONY: all check coverage coverdata docker fmtcheck install lint test vet
|
70
vendor/github.com/aristanetworks/goarista/README.md
generated
vendored
70
vendor/github.com/aristanetworks/goarista/README.md
generated
vendored
@ -1,70 +0,0 @@
|
||||
# Arista Go library [](https://travis-ci.org/aristanetworks/goarista) [](http://codecov.io/github/aristanetworks/goarista?branch=master) [](https://godoc.org/github.com/aristanetworks/goarista) [](https://goreportcard.com/report/github.com/aristanetworks/goarista)
|
||||
|
||||
## areflect
|
||||
|
||||
Helper functions to work with the `reflect` package. Contains
|
||||
`ForceExport()`, which bypasses the check in `reflect.Value` that
|
||||
prevents accessing unexported attributes.
|
||||
|
||||
## monotime
|
||||
|
||||
Provides access to a fast monotonic clock source, to fill in the gap in the
|
||||
[Go standard library, which lacks one](https://github.com/golang/go/issues/12914).
|
||||
Don't use `time.Now()` in code that needs to time things or otherwise assume
|
||||
that time passes at a constant rate, instead use `monotime.Now()`.
|
||||
|
||||
## cmd
|
||||
|
||||
See the [cmd](cmd) directory.
|
||||
|
||||
## dscp
|
||||
|
||||
Provides `ListenTCPWithTOS()`, which is a replacement for `net.ListenTCP()`
|
||||
that allows specifying the ToS (Type of Service), to specify DSCP / ECN /
|
||||
class of service flags to use for incoming connections. Requires `go1.9`.
|
||||
|
||||
## key
|
||||
|
||||
Provides common types used across various Arista projects. The type `key.Key`
|
||||
is used to work around the fact that Go can't let one use a non-hashable type
|
||||
as a key to a `map`, and we sometimes need to use a `map[string]interface{}`
|
||||
(or something containing one) as a key to maps. As a result, we frequently use
|
||||
`map[key.Key]interface{}` instead of just `map[interface{}]interface{}` when we
|
||||
need a generic key-value collection. The type `key.Path` is the representation
|
||||
of a path broken down into individual elements, where each element is a `key.Key`.
|
||||
The type `key.Pointer` represents a pointer to a `key.Path`.
|
||||
|
||||
## path
|
||||
|
||||
Provides functions that can be used to manipulate `key.Path` objects. The type
|
||||
`path.Map` may be used for mapping paths to values. It allows for some fuzzy
|
||||
matching for paths containing `path.Wildcard` keys.
|
||||
|
||||
## lanz
|
||||
A client for [LANZ](https://eos.arista.com/latency-analyzer-lanz-architectures-and-configuration/)
|
||||
streaming servers. It connects to a LANZ streaming server,
|
||||
listens for notifications, decodes them and sends the LANZ protobuf on the
|
||||
provided channel.
|
||||
|
||||
## monitor
|
||||
|
||||
A library to help expose monitoring metrics on top of the
|
||||
[`expvar`](https://golang.org/pkg/expvar/) infrastructure.
|
||||
|
||||
## netns
|
||||
|
||||
`netns.Do(namespace, cb)` provides a handy mechanism to execute the given
|
||||
callback `cb` in the given [network namespace](https://lwn.net/Articles/580893/).
|
||||
|
||||
## influxlib
|
||||
|
||||
This is a influxdb library that provides easy methods of connecting to, writing to,
|
||||
and reading from the service.
|
||||
|
||||
## test
|
||||
|
||||
This is a [Go](http://golang.org/) library to help in writing unit tests.
|
||||
|
||||
## Examples
|
||||
|
||||
TBD
|
38
vendor/github.com/aristanetworks/goarista/areflect/force.go
generated
vendored
38
vendor/github.com/aristanetworks/goarista/areflect/force.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2014 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// Package areflect provides utilities to help with reflection.
|
||||
package areflect
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ForceExport returns a new reflect.Value that is identical to the one passed
|
||||
// in argument except that it's considered as an exported symbol even if in
|
||||
// reality it isn't.
|
||||
//
|
||||
// The `reflect' package intentionally makes it impossible to access the value
|
||||
// of an unexported attribute. The implementation of reflect.DeepEqual() cheats
|
||||
// as it bypasses this check. Unfortunately, we can't use the same cheat, which
|
||||
// prevents us from re-implementing DeepEqual properly or implementing some other
|
||||
// reflection-based tools. So this is our cheat on top of theirs. It makes
|
||||
// the given reflect.Value appear as if it was exported.
|
||||
//
|
||||
// This function requires go1.6 or newer.
|
||||
func ForceExport(v reflect.Value) reflect.Value {
|
||||
// constants from reflect/value.go
|
||||
const flagStickyRO uintptr = 1 << 5
|
||||
const flagEmbedRO uintptr = 1 << 6 // new in go1.6 (was flagIndir before)
|
||||
const flagRO uintptr = flagStickyRO | flagEmbedRO
|
||||
ptr := unsafe.Pointer(&v)
|
||||
rv := (*struct {
|
||||
typ unsafe.Pointer // a *reflect.rtype (reflect.Type)
|
||||
ptr unsafe.Pointer // The value wrapped by this reflect.Value
|
||||
flag uintptr
|
||||
})(ptr)
|
||||
rv.flag &= ^flagRO // Unset the flag so this value appears to be exported.
|
||||
return v
|
||||
}
|
36
vendor/github.com/aristanetworks/goarista/areflect/force_test.go
generated
vendored
36
vendor/github.com/aristanetworks/goarista/areflect/force_test.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package areflect
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type embedme struct {
|
||||
}
|
||||
|
||||
type somestruct struct {
|
||||
a uint32
|
||||
embedme
|
||||
}
|
||||
|
||||
func TestForcePublic(t *testing.T) {
|
||||
c := somestruct{a: 42}
|
||||
v := reflect.ValueOf(c)
|
||||
// Without the call to forceExport(), the following line would crash with
|
||||
// "panic: reflect.Value.Interface: cannot return value obtained from
|
||||
// unexported field or method".
|
||||
a := ForceExport(v.FieldByName("a")).Interface()
|
||||
if i, ok := a.(uint32); !ok {
|
||||
t.Fatalf("Should have gotten a uint32 but got a %T", a)
|
||||
} else if i != 42 {
|
||||
t.Fatalf("Should have gotten 42 but got a %d", i)
|
||||
}
|
||||
e := ForceExport(v.FieldByName("embedme")).Interface()
|
||||
if _, ok := e.(embedme); !ok {
|
||||
t.Fatalf("Should have gotten a embedme but got a %T", e)
|
||||
}
|
||||
}
|
19
vendor/github.com/aristanetworks/goarista/check_copyright_notice.sh
generated
vendored
19
vendor/github.com/aristanetworks/goarista/check_copyright_notice.sh
generated
vendored
@ -1,19 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2017 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the COPYING file.
|
||||
|
||||
# egrep that comes with our Linux distro doesn't like \d, so use [0-9]
|
||||
notice='Copyright \(c\) 20[0-9][0-9] Arista Networks, Inc.'
|
||||
files=`git diff-tree --no-commit-id --name-only --diff-filter=ACMR -r HEAD | \
|
||||
egrep '\.(go|proto|py|sh)$' | grep -v '\.pb\.go$'`
|
||||
status=0
|
||||
|
||||
for file in $files; do
|
||||
if ! egrep -q "$notice" $file; then
|
||||
echo "$file: missing or incorrect copyright notice"
|
||||
status=1
|
||||
fi
|
||||
done
|
||||
|
||||
exit $status
|
25
vendor/github.com/aristanetworks/goarista/check_line_len.awk
generated
vendored
25
vendor/github.com/aristanetworks/goarista/check_line_len.awk
generated
vendored
@ -1,25 +0,0 @@
|
||||
#!/usr/bin/awk -f
|
||||
# Copyright (c) 2015 Arista Networks, Inc.
|
||||
# Use of this source code is governed by the Apache License 2.0
|
||||
# that can be found in the COPYING file.
|
||||
|
||||
BEGIN {
|
||||
max = 100;
|
||||
}
|
||||
|
||||
# Expand tabs to 4 spaces.
|
||||
{
|
||||
gsub(/\t/, " ");
|
||||
}
|
||||
|
||||
length() > max {
|
||||
errors++;
|
||||
print FILENAME ":" FNR ": Line too long (" length() "/" max ")";
|
||||
}
|
||||
|
||||
END {
|
||||
if (errors >= 125) {
|
||||
errors = 125;
|
||||
}
|
||||
exit errors;
|
||||
}
|
16
vendor/github.com/aristanetworks/goarista/cmd/README.md
generated
vendored
16
vendor/github.com/aristanetworks/goarista/cmd/README.md
generated
vendored
@ -1,16 +0,0 @@
|
||||
# OpenConfig clients
|
||||
|
||||
The `oc*` commands are clients for the [OpenConfig](http://openconfig.net) gRPC interface.
|
||||
|
||||
# importsort
|
||||
|
||||
`importsort` is a utility for sorting and sectioning import blocks in go code.
|
||||
|
||||
# Running
|
||||
|
||||
After installing [Go](https://golang.org/dl/) and setting the [GOPATH](https://golang.org/doc/code.html#GOPATH) environment variable to the path to your workspace, you can just run:
|
||||
|
||||
```
|
||||
go get github.com/aristanetworks/goarista/cmd/<cmd>
|
||||
$GOPATH/bin/<cmd>
|
||||
```
|
202
vendor/github.com/aristanetworks/goarista/cmd/gnmi/README.md
generated
vendored
202
vendor/github.com/aristanetworks/goarista/cmd/gnmi/README.md
generated
vendored
@ -1,202 +0,0 @@
|
||||
# gnmi
|
||||
|
||||
`gnmi` is a command-line client for interacting with a
|
||||
[gNMI service](https://github.com/openconfig/reference/tree/master/rpc/gnmi).
|
||||
|
||||
# Installation
|
||||
|
||||
After installing [Go](https://golang.org/dl/) run:
|
||||
|
||||
```
|
||||
go get github.com/aristanetworks/goarista/cmd/gnmi
|
||||
```
|
||||
|
||||
This will install the `gnmi` binary in the `bin` directory
|
||||
under [GOPATH](https://golang.org/doc/code.html#GOPATH).
|
||||
|
||||
# Usage
|
||||
|
||||
```
|
||||
$ gnmi [OPTIONS] [OPERATION]
|
||||
```
|
||||
|
||||
When running on the switch in a non-default VRF:
|
||||
|
||||
```
|
||||
$ ip netns exec ns-<VRF> gnmi [OPTIONS] [OPERATION]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
* `-addr [<VRF-NAME>/]ADDR:PORT`
|
||||
Address of the gNMI endpoint (REQUIRED) with VRF name (OPTIONAL)
|
||||
* `-username USERNAME`
|
||||
Username to authenticate with
|
||||
* `-password PASSWORD`
|
||||
Password to authenticate with
|
||||
* `-tls`
|
||||
Enable TLS
|
||||
* `-cafile PATH`
|
||||
Path to server TLS certificate file
|
||||
* `-certfile PATH`
|
||||
Path to client TLS certificate file
|
||||
* `-keyfile PATH`
|
||||
Path to client TLS private key file
|
||||
|
||||
## Operations
|
||||
|
||||
`gnmi` supports the following operations: `capabilites`, `get`,
|
||||
`subscribe`, `update`, `replace`, and `delete`.
|
||||
|
||||
### capabilities
|
||||
|
||||
`capabilities` prints the result of calling the
|
||||
[Capabilities gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery).
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ gnmi [OPTIONS] capabilities
|
||||
```
|
||||
|
||||
### get
|
||||
|
||||
`get` requires a path and calls the
|
||||
[Get gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths).
|
||||
|
||||
Example:
|
||||
|
||||
Get all configuration in the default network instance:
|
||||
```
|
||||
$ gnmi [OPTIONS] get '/network-instances/network-instance[name=default]'
|
||||
```
|
||||
|
||||
### subscribe
|
||||
|
||||
`subscribe` requires a path and calls the
|
||||
[Subscribe gNMI RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35-subscribing-to-telemetry-updates).
|
||||
This command will continuously print out results until signalled to
|
||||
exit, for example by typing `Ctrl-C`.
|
||||
|
||||
Example:
|
||||
|
||||
Subscribe to interface counters:
|
||||
```
|
||||
$ gnmi [OPTIONS] subscribe '/interfaces/interface[name=*]/state/counters'
|
||||
```
|
||||
|
||||
### update/replace/delete
|
||||
|
||||
`update`, `replace`, and `delete` are used to
|
||||
[modify the configuration of a gNMI endpoint](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state).
|
||||
All of these operations take a path that must specify a single node
|
||||
element. In other words all list members must be fully-specified.
|
||||
|
||||
`delete` takes a path and will delete that path.
|
||||
|
||||
Example:
|
||||
|
||||
Delete BGP configuration in the default network instance:
|
||||
```
|
||||
$ gnmi [OPTIONS] delete '/network-instances/network-instance[name=default]/protocols/protocol[name=BGP][identifier=BGP]/'
|
||||
```
|
||||
|
||||
`update` and `replace` both take a path and a value in JSON
|
||||
format. The JSON data may be provided in a file. See
|
||||
[here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#344-modes-of-update-replace-versus-update)
|
||||
for documentation on the differences between `update` and `replace`.
|
||||
|
||||
Examples:
|
||||
|
||||
Disable interface Ethernet3/42:
|
||||
```
|
||||
gnmi [OPTIONS] update '/interfaces/interface[name=Ethernet3/42]/config/enabled' 'false'
|
||||
```
|
||||
|
||||
Replace the BGP global configuration:
|
||||
```
|
||||
gnmi [OPTIONS] replace '/network-instances/network-instance[name=default]/protocols/protocol[name=BGP][identifier=BGP]/bgp/global' '{"config":{"as": 1234, "router-id": "1.2.3.4"}}'
|
||||
```
|
||||
|
||||
Note: String values need to be quoted if they look like JSON. For example, setting the login banner to `tor[13]`:
|
||||
```
|
||||
gnmi [OPTIONS] update '/system/config/login-banner '"tor[13]"'
|
||||
```
|
||||
|
||||
#### JSON in a file
|
||||
|
||||
The value argument to `update` and `replace` may be a file. The
|
||||
content of the file is used to make the request.
|
||||
|
||||
Example:
|
||||
|
||||
File `path/to/subintf100.json` contains the following:
|
||||
|
||||
```
|
||||
{
|
||||
"subinterface": [
|
||||
{
|
||||
"config": {
|
||||
"enabled": true,
|
||||
"index": 100
|
||||
},
|
||||
"index": 100
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Add subinterface 100 to interfaces Ethernet4/1/1 and Ethernet4/2/1 in
|
||||
one transaction:
|
||||
|
||||
```
|
||||
gnmi [OPTIONS] update '/interfaces/interface[name=Ethernet4/1/1]/subinterfaces' path/to/subintf100.json \
|
||||
update '/interfaces/interface[name=Ethernet4/2/1]/subinterfaces' path/to/subintf100.json
|
||||
```
|
||||
|
||||
### CLI requests
|
||||
`gnmi` offers the ability to send CLI text inside an `update` or
|
||||
`replace` operation. This is achieved by doing an `update` or
|
||||
`replace` and specifying `"origin=cli"` along with an empty path and a set of configure-mode
|
||||
CLI commands separated by `\n`.
|
||||
|
||||
Example:
|
||||
|
||||
Configure the idle-timeout on SSH connections
|
||||
```
|
||||
gnmi [OPTIONS] update 'cli' 'management ssh
|
||||
idle-timeout 300'
|
||||
```
|
||||
|
||||
### P4 Config
|
||||
`gnmi` offers the ability to send p4 config files inside a `replace` operation.
|
||||
This is achieved by doing a `replace` and specifying `"origin=p4_config"`
|
||||
along with the path of the p4 config file to send.
|
||||
|
||||
Example:
|
||||
|
||||
Send the config.p4 file
|
||||
```
|
||||
gnmi [OPTIONS] replace 'origin=p4_config' 'config.p4'
|
||||
```
|
||||
|
||||
## Paths
|
||||
|
||||
Paths in `gnmi` use a simplified xpath style. Path elements are
|
||||
separated by `/`. Selectors may be used on list to select certain
|
||||
members. Selectors are of the form `[key-leaf=value]`. All members of a
|
||||
list may be selected by not specifying any selectors, or by using a
|
||||
`*` as the value in a selector. The following are equivalent:
|
||||
|
||||
* `/interfaces/interface`
|
||||
* `/interfaces/interface[name=*]`
|
||||
|
||||
All characters, including `/` are allowed inside a selector value. The
|
||||
character `]` must be escaped, for example `[key=[\]]` selects the
|
||||
element in the list whose `key` leaf is value `[]`.
|
||||
|
||||
See more examples of paths in the examples above.
|
||||
|
||||
See
|
||||
[here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths)
|
||||
for more information.
|
166
vendor/github.com/aristanetworks/goarista/cmd/gnmi/main.go
generated
vendored
166
vendor/github.com/aristanetworks/goarista/cmd/gnmi/main.go
generated
vendored
@ -1,166 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
// TODO: Make this more clear
|
||||
var help = `Usage of gnmi:
|
||||
gnmi -addr [<VRF-NAME>/]ADDRESS:PORT [options...]
|
||||
capabilities
|
||||
get PATH+
|
||||
subscribe PATH+
|
||||
((update|replace (origin=ORIGIN) PATH JSON|FILE)|(delete (origin=ORIGIN) PATH))+
|
||||
`
|
||||
|
||||
func usageAndExit(s string) {
|
||||
flag.Usage()
|
||||
if s != "" {
|
||||
fmt.Fprintln(os.Stderr, s)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg := &gnmi.Config{}
|
||||
flag.StringVar(&cfg.Addr, "addr", "", "Address of gNMI gRPC server with optional VRF name")
|
||||
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
|
||||
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
|
||||
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
|
||||
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
|
||||
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
|
||||
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
|
||||
|
||||
subscribeOptions := &gnmi.SubscribeOptions{}
|
||||
flag.StringVar(&subscribeOptions.Prefix, "prefix", "", "Subscribe prefix path")
|
||||
flag.BoolVar(&subscribeOptions.UpdatesOnly, "updates_only", false,
|
||||
"Subscribe to updates only (false | true)")
|
||||
flag.StringVar(&subscribeOptions.Mode, "mode", "stream",
|
||||
"Subscribe mode (stream | once | poll)")
|
||||
flag.StringVar(&subscribeOptions.StreamMode, "stream_mode", "target_defined",
|
||||
"Subscribe stream mode, only applies for stream subscriptions "+
|
||||
"(target_defined | on_change | sample)")
|
||||
sampleIntervalStr := flag.String("sample_interval", "0", "Subscribe sample interval, "+
|
||||
"only applies for sample subscriptions (400ms, 2.5s, 1m, etc.)")
|
||||
heartbeatIntervalStr := flag.String("heartbeat_interval", "0", "Subscribe heartbeat "+
|
||||
"interval, only applies for on-change subscriptions (400ms, 2.5s, 1m, etc.)")
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintln(os.Stderr, help)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
if cfg.Addr == "" {
|
||||
usageAndExit("error: address not specified")
|
||||
}
|
||||
|
||||
var sampleInterval, heartbeatInterval time.Duration
|
||||
var err error
|
||||
if sampleInterval, err = time.ParseDuration(*sampleIntervalStr); err != nil {
|
||||
usageAndExit(fmt.Sprintf("error: sample interval (%s) invalid", *sampleIntervalStr))
|
||||
}
|
||||
subscribeOptions.SampleInterval = uint64(sampleInterval)
|
||||
if heartbeatInterval, err = time.ParseDuration(*heartbeatIntervalStr); err != nil {
|
||||
usageAndExit(fmt.Sprintf("error: heartbeat interval (%s) invalid", *heartbeatIntervalStr))
|
||||
}
|
||||
subscribeOptions.HeartbeatInterval = uint64(heartbeatInterval)
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
ctx := gnmi.NewContext(context.Background(), cfg)
|
||||
client, err := gnmi.Dial(cfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
var setOps []*gnmi.Operation
|
||||
for i := 0; i < len(args); i++ {
|
||||
switch args[i] {
|
||||
case "capabilities":
|
||||
if len(setOps) != 0 {
|
||||
usageAndExit("error: 'capabilities' not allowed after 'merge|replace|delete'")
|
||||
}
|
||||
err := gnmi.Capabilities(ctx, client)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
return
|
||||
case "get":
|
||||
if len(setOps) != 0 {
|
||||
usageAndExit("error: 'get' not allowed after 'merge|replace|delete'")
|
||||
}
|
||||
err := gnmi.Get(ctx, client, gnmi.SplitPaths(args[i+1:]))
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
return
|
||||
case "subscribe":
|
||||
if len(setOps) != 0 {
|
||||
usageAndExit("error: 'subscribe' not allowed after 'merge|replace|delete'")
|
||||
}
|
||||
respChan := make(chan *pb.SubscribeResponse)
|
||||
errChan := make(chan error)
|
||||
defer close(errChan)
|
||||
subscribeOptions.Paths = gnmi.SplitPaths(args[i+1:])
|
||||
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
|
||||
for {
|
||||
select {
|
||||
case resp, open := <-respChan:
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
if err := gnmi.LogSubscribeResponse(resp); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
case err := <-errChan:
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
case "update", "replace", "delete":
|
||||
if len(args) == i+1 {
|
||||
usageAndExit("error: missing path")
|
||||
}
|
||||
op := &gnmi.Operation{
|
||||
Type: args[i],
|
||||
}
|
||||
i++
|
||||
if strings.HasPrefix(args[i], "origin=") {
|
||||
op.Origin = strings.TrimPrefix(args[i], "origin=")
|
||||
i++
|
||||
}
|
||||
op.Path = gnmi.SplitPath(args[i])
|
||||
if op.Type != "delete" {
|
||||
if len(args) == i+1 {
|
||||
usageAndExit("error: missing JSON or FILEPATH to data")
|
||||
}
|
||||
i++
|
||||
op.Val = args[i]
|
||||
}
|
||||
setOps = append(setOps, op)
|
||||
default:
|
||||
usageAndExit(fmt.Sprintf("error: unknown operation %q", args[i]))
|
||||
}
|
||||
}
|
||||
if len(setOps) == 0 {
|
||||
usageAndExit("")
|
||||
}
|
||||
err = gnmi.Set(ctx, client, setOps)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
245
vendor/github.com/aristanetworks/goarista/cmd/importsort/main.go
generated
vendored
245
vendor/github.com/aristanetworks/goarista/cmd/importsort/main.go
generated
vendored
@ -1,245 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/vcs"
|
||||
)
|
||||
|
||||
// Implementation taken from "isStandardImportPath" in go's source.
|
||||
func isStdLibPath(path string) bool {
|
||||
i := strings.Index(path, "/")
|
||||
if i < 0 {
|
||||
i = len(path)
|
||||
}
|
||||
elem := path[:i]
|
||||
return !strings.Contains(elem, ".")
|
||||
}
|
||||
|
||||
// sortImports takes in an "import" body and returns it sorted
|
||||
func sortImports(in []byte, sections []string) []byte {
|
||||
type importLine struct {
|
||||
index int // index into inLines
|
||||
path string // import path used for sorting
|
||||
}
|
||||
// imports holds all the import lines, separated by section. The
|
||||
// first section is for stdlib imports, the following sections
|
||||
// hold the user specified sections, the final section is for
|
||||
// everything else.
|
||||
imports := make([][]importLine, len(sections)+2)
|
||||
addImport := func(section, index int, importPath string) {
|
||||
imports[section] = append(imports[section], importLine{index, importPath})
|
||||
}
|
||||
stdlib := 0
|
||||
offset := 1
|
||||
other := len(imports) - 1
|
||||
|
||||
inLines := bytes.Split(in, []byte{'\n'})
|
||||
for i, line := range inLines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
start := bytes.IndexByte(line, '"')
|
||||
if start == -1 {
|
||||
continue
|
||||
}
|
||||
if comment := bytes.Index(line, []byte("//")); comment > -1 && comment < start {
|
||||
continue
|
||||
}
|
||||
|
||||
start++ // skip '"'
|
||||
end := bytes.IndexByte(line[start:], '"') + start
|
||||
s := string(line[start:end])
|
||||
|
||||
found := false
|
||||
for j, sect := range sections {
|
||||
if strings.HasPrefix(s, sect) && (len(sect) == len(s) || s[len(sect)] == '/') {
|
||||
addImport(j+offset, i, s)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
if isStdLibPath(s) {
|
||||
addImport(stdlib, i, s)
|
||||
} else {
|
||||
addImport(other, i, s)
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]byte, 0, len(in)+2)
|
||||
needSeperator := false
|
||||
for _, section := range imports {
|
||||
if len(section) == 0 {
|
||||
continue
|
||||
}
|
||||
if needSeperator {
|
||||
out = append(out, '\n')
|
||||
}
|
||||
sort.Slice(section, func(a, b int) bool {
|
||||
return section[a].path < section[b].path
|
||||
})
|
||||
for _, s := range section {
|
||||
out = append(out, inLines[s.index]...)
|
||||
out = append(out, '\n')
|
||||
}
|
||||
needSeperator = true
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func genFile(in []byte, sections []string) ([]byte, error) {
|
||||
out := make([]byte, 0, len(in)+3) // Add some fudge to avoid re-allocation
|
||||
|
||||
for {
|
||||
const importLine = "\nimport (\n"
|
||||
const importLineLen = len(importLine)
|
||||
importStart := bytes.Index(in, []byte(importLine))
|
||||
if importStart == -1 {
|
||||
break
|
||||
}
|
||||
// Save to `out` everything up to and including "import(\n"
|
||||
out = append(out, in[:importStart+importLineLen]...)
|
||||
in = in[importStart+importLineLen:]
|
||||
importLen := bytes.Index(in, []byte("\n)\n"))
|
||||
if importLen == -1 {
|
||||
return nil, errors.New(`parsing error: missing ")"`)
|
||||
}
|
||||
// Sort body of "import" and write it to `out`
|
||||
out = append(out, sortImports(in[:importLen], sections)...)
|
||||
out = append(out, []byte(")")...)
|
||||
in = in[importLen+2:]
|
||||
}
|
||||
// Write everything leftover to out
|
||||
out = append(out, in...)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// returns true if the file changed
|
||||
func processFile(filename string, writeFile, listDiffFiles bool, sections []string) (bool, error) {
|
||||
in, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
out, err := genFile(in, sections)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
equal := bytes.Equal(in, out)
|
||||
if listDiffFiles {
|
||||
return !equal, nil
|
||||
}
|
||||
if !writeFile {
|
||||
os.Stdout.Write(out)
|
||||
return !equal, nil
|
||||
}
|
||||
|
||||
if equal {
|
||||
return false, nil
|
||||
}
|
||||
temp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer os.RemoveAll(temp.Name())
|
||||
s, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err = temp.Write(out); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := temp.Close(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chmod(temp.Name(), s.Mode()); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Rename(temp.Name(), filename); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// maps directory to vcsRoot
|
||||
var vcsRootCache = make(map[string]string)
|
||||
|
||||
func vcsRootImportPath(f string) (string, error) {
|
||||
path, err := filepath.Abs(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
if root, ok := vcsRootCache[dir]; ok {
|
||||
return root, nil
|
||||
}
|
||||
gopath := build.Default.GOPATH
|
||||
var root string
|
||||
_, root, err = vcs.FromDir(dir, filepath.Join(gopath, "src"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
vcsRootCache[dir] = root
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
writeFile := flag.Bool("w", false, "write result to file instead of stdout")
|
||||
listDiffFiles := flag.Bool("l", false, "list files whose formatting differs from importsort")
|
||||
var sections multistring
|
||||
flag.Var(§ions, "s", "package `prefix` to define an import section,"+
|
||||
` ex: "cvshub.com/company". May be specified multiple times.`+
|
||||
" If not specified the repository root is used.")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
checkVCSRoot := sections == nil
|
||||
for _, f := range flag.Args() {
|
||||
if checkVCSRoot {
|
||||
root, err := vcsRootImportPath(f)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error determining VCS root for file %q: %s", f, err)
|
||||
continue
|
||||
} else {
|
||||
sections = multistring{root}
|
||||
}
|
||||
}
|
||||
diff, err := processFile(f, *writeFile, *listDiffFiles, sections)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error while proccessing file %q: %s", f, err)
|
||||
continue
|
||||
}
|
||||
if *listDiffFiles && diff {
|
||||
fmt.Println(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type multistring []string
|
||||
|
||||
func (m *multistring) String() string {
|
||||
return strings.Join(*m, ", ")
|
||||
}
|
||||
func (m *multistring) Set(s string) error {
|
||||
*m = append(*m, s)
|
||||
return nil
|
||||
}
|
40
vendor/github.com/aristanetworks/goarista/cmd/importsort/main_test.go
generated
vendored
40
vendor/github.com/aristanetworks/goarista/cmd/importsort/main_test.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
goldFile = "testdata/test.go.gold"
|
||||
inFile = "testdata/test.go.in"
|
||||
)
|
||||
|
||||
func TestImportSort(t *testing.T) {
|
||||
in, err := ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gold, err := ioutil.ReadFile(goldFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sections := []string{"foobar", "cvshub.com/foobar"}
|
||||
if out, err := genFile(gold, sections); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !bytes.Equal(out, gold) {
|
||||
t.Errorf("importsort on %s file produced a change", goldFile)
|
||||
t.Log(string(out))
|
||||
}
|
||||
if out, err := genFile(in, sections); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !bytes.Equal(out, gold) {
|
||||
t.Errorf("importsort on %s different than gold", inFile)
|
||||
t.Log(string(out))
|
||||
}
|
||||
}
|
52
vendor/github.com/aristanetworks/goarista/cmd/importsort/testdata/test.go.gold
generated
vendored
52
vendor/github.com/aristanetworks/goarista/cmd/importsort/testdata/test.go.gold
generated
vendored
@ -1,52 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"foobar"
|
||||
"foobar/baz"
|
||||
|
||||
"cvshub.com/foobar/import"
|
||||
)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"foobar"
|
||||
"foobar/baz"
|
||||
|
||||
"cvshub.com/foobar/import"
|
||||
|
||||
"cvshub.com/other/import"
|
||||
)
|
||||
|
||||
func foobar() {}
|
||||
|
||||
import (
|
||||
z "bytes"
|
||||
"strings"
|
||||
|
||||
"foobar"
|
||||
_ "foobar/baz" // in line comment
|
||||
. "foobar/qux" // in line comment
|
||||
|
||||
"cvshub.com/foobar/import"
|
||||
)
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"cvshub.com/foobar/import"
|
||||
)
|
||||
import (
|
||||
"cvshub.com/foobar/import"
|
||||
)
|
||||
|
||||
func main() {
|
||||
foobar()
|
||||
}
|
47
vendor/github.com/aristanetworks/goarista/cmd/importsort/testdata/test.go.in
generated
vendored
47
vendor/github.com/aristanetworks/goarista/cmd/importsort/testdata/test.go.in
generated
vendored
@ -1,47 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"foobar"
|
||||
"foobar/baz"
|
||||
|
||||
"cvshub.com/foobar/import"
|
||||
)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"foobar"
|
||||
"foobar/baz"
|
||||
"cvshub.com/foobar/import"
|
||||
"cvshub.com/other/import"
|
||||
)
|
||||
|
||||
func foobar() {}
|
||||
|
||||
import (
|
||||
// Comment going away
|
||||
"cvshub.com/foobar/import"
|
||||
"strings"
|
||||
_ "foobar/baz" // in line comment
|
||||
"foobar"
|
||||
z "bytes"
|
||||
. "foobar/qux" // in line comment
|
||||
)
|
||||
import (
|
||||
"cvshub.com/foobar/import"
|
||||
"bytes"
|
||||
)
|
||||
import (
|
||||
"cvshub.com/foobar/import"
|
||||
)
|
||||
|
||||
func main() {
|
||||
foobar()
|
||||
}
|
120
vendor/github.com/aristanetworks/goarista/cmd/json2test/main.go
generated
vendored
120
vendor/github.com/aristanetworks/goarista/cmd/json2test/main.go
generated
vendored
@ -1,120 +0,0 @@
|
||||
// Copyright (c) 2018 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// json2test reformats 'go test -json' output as text as if the -json
|
||||
// flag were not passed to go test. It is useful if you want to
|
||||
// analyze go test -json output, but still want a human readable test
|
||||
// log.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// go test -json > out.txt; <analysis program> out.txt; cat out.txt | json2test
|
||||
//
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errTestFailure = errors.New("testfailure")
|
||||
|
||||
func main() {
|
||||
err := writeTestOutput(os.Stdin, os.Stdout)
|
||||
if err == errTestFailure {
|
||||
os.Exit(1)
|
||||
} else if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type testEvent struct {
|
||||
Time time.Time // encodes as an RFC3339-format string
|
||||
Action string
|
||||
Package string
|
||||
Test string
|
||||
Elapsed float64 // seconds
|
||||
Output string
|
||||
}
|
||||
|
||||
type test struct {
|
||||
pkg string
|
||||
test string
|
||||
}
|
||||
|
||||
type outputBuffer struct {
|
||||
output []string
|
||||
}
|
||||
|
||||
func (o *outputBuffer) push(s string) {
|
||||
o.output = append(o.output, s)
|
||||
}
|
||||
|
||||
type testFailure struct {
|
||||
t test
|
||||
o outputBuffer
|
||||
}
|
||||
|
||||
func writeTestOutput(in io.Reader, out io.Writer) error {
|
||||
testOutputBuffer := map[test]*outputBuffer{}
|
||||
var failures []testFailure
|
||||
d := json.NewDecoder(in)
|
||||
|
||||
buf := bufio.NewWriter(out)
|
||||
defer buf.Flush()
|
||||
for {
|
||||
var e testEvent
|
||||
if err := d.Decode(&e); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
switch e.Action {
|
||||
default:
|
||||
continue
|
||||
case "run":
|
||||
testOutputBuffer[test{pkg: e.Package, test: e.Test}] = new(outputBuffer)
|
||||
case "pass":
|
||||
// Don't hold onto text for passing
|
||||
delete(testOutputBuffer, test{pkg: e.Package, test: e.Test})
|
||||
case "fail":
|
||||
// fail may be for a package, which won't have an entry in
|
||||
// testOutputBuffer because packages don't have a "run"
|
||||
// action.
|
||||
t := test{pkg: e.Package, test: e.Test}
|
||||
if o, ok := testOutputBuffer[t]; ok {
|
||||
f := testFailure{t: t, o: *o}
|
||||
delete(testOutputBuffer, t)
|
||||
failures = append(failures, f)
|
||||
}
|
||||
case "output":
|
||||
buf.WriteString(e.Output)
|
||||
// output may be for a package, which won't have an entry
|
||||
// in testOutputBuffer because packages don't have a "run"
|
||||
// action.
|
||||
if o, ok := testOutputBuffer[test{pkg: e.Package, test: e.Test}]; ok {
|
||||
o.push(e.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(failures) == 0 {
|
||||
return nil
|
||||
}
|
||||
buf.WriteString("\nTest failures:\n")
|
||||
for i, f := range failures {
|
||||
fmt.Fprintf(buf, "[%d] %s.%s\n", i+1, f.t.pkg, f.t.test)
|
||||
for _, s := range f.o.output {
|
||||
buf.WriteString(s)
|
||||
}
|
||||
if i < len(failures)-1 {
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
return errTestFailure
|
||||
}
|
41
vendor/github.com/aristanetworks/goarista/cmd/json2test/main_test.go
generated
vendored
41
vendor/github.com/aristanetworks/goarista/cmd/json2test/main_test.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// Copyright (c) 2018 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWriteTestOutput(t *testing.T) {
|
||||
input, err := os.Open("testdata/input.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
if err := writeTestOutput(input, &out); err != errTestFailure {
|
||||
t.Error("expected test failure")
|
||||
}
|
||||
|
||||
gold, err := os.Open("testdata/gold.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected, err := ioutil.ReadAll(gold)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(out.Bytes(), expected) {
|
||||
t.Error("output does not match gold.txt")
|
||||
fmt.Println("Expected:")
|
||||
fmt.Println(string(expected))
|
||||
fmt.Println("Got:")
|
||||
fmt.Println(out.String())
|
||||
}
|
||||
}
|
16
vendor/github.com/aristanetworks/goarista/cmd/json2test/testdata/gold.txt
generated
vendored
16
vendor/github.com/aristanetworks/goarista/cmd/json2test/testdata/gold.txt
generated
vendored
@ -1,16 +0,0 @@
|
||||
? pkg/skipped [no test files]
|
||||
=== RUN TestPass
|
||||
--- PASS: TestPass (0.00s)
|
||||
PASS
|
||||
ok pkg/passed 0.013s
|
||||
panic
|
||||
FAIL pkg/panic 600.029s
|
||||
--- FAIL: TestFail (0.18s)
|
||||
|
||||
Test failures:
|
||||
[1] pkg/panic.TestPanic
|
||||
panic
|
||||
FAIL pkg/panic 600.029s
|
||||
|
||||
[2] pkg/failed.TestFail
|
||||
--- FAIL: TestFail (0.18s)
|
17
vendor/github.com/aristanetworks/goarista/cmd/json2test/testdata/input.txt
generated
vendored
17
vendor/github.com/aristanetworks/goarista/cmd/json2test/testdata/input.txt
generated
vendored
@ -1,17 +0,0 @@
|
||||
{"Time":"2018-03-08T10:33:12.002692769-08:00","Action":"output","Package":"pkg/skipped","Output":"? \tpkg/skipped\t[no test files]\n"}
|
||||
{"Time":"2018-03-08T10:33:12.003199228-08:00","Action":"skip","Package":"pkg/skipped","Elapsed":0.001}
|
||||
{"Time":"2018-03-08T10:33:12.343866281-08:00","Action":"run","Package":"pkg/passed","Test":"TestPass"}
|
||||
{"Time":"2018-03-08T10:33:12.34406622-08:00","Action":"output","Package":"pkg/passed","Test":"TestPass","Output":"=== RUN TestPass\n"}
|
||||
{"Time":"2018-03-08T10:33:12.344139342-08:00","Action":"output","Package":"pkg/passed","Test":"TestPass","Output":"--- PASS: TestPass (0.00s)\n"}
|
||||
{"Time":"2018-03-08T10:33:12.344165231-08:00","Action":"pass","Package":"pkg/passed","Test":"TestPass","Elapsed":0}
|
||||
{"Time":"2018-03-08T10:33:12.344297059-08:00","Action":"output","Package":"pkg/passed","Output":"PASS\n"}
|
||||
{"Time":"2018-03-08T10:33:12.345217622-08:00","Action":"output","Package":"pkg/passed","Output":"ok \tpkg/passed\t0.013s\n"}
|
||||
{"Time":"2018-03-08T10:33:12.34533033-08:00","Action":"pass","Package":"pkg/passed","Elapsed":0.013}
|
||||
{"Time":"2018-03-08T10:33:20.243866281-08:00","Action":"run","Package":"pkg/panic","Test":"TestPanic"}
|
||||
{"Time":"2018-03-08T10:33:20.27231537-08:00","Action":"output","Package":"pkg/panic","Test":"TestPanic","Output":"panic\n"}
|
||||
{"Time":"2018-03-08T10:33:20.272414481-08:00","Action":"output","Package":"pkg/panic","Test":"TestPanic","Output":"FAIL\tpkg/panic\t600.029s\n"}
|
||||
{"Time":"2018-03-08T10:33:20.272440286-08:00","Action":"fail","Package":"pkg/panic","Test":"TestPanic","Elapsed":600.029}
|
||||
{"Time":"2018-03-08T10:33:26.143866281-08:00","Action":"run","Package":"pkg/failed","Test":"TestFail"}
|
||||
{"Time":"2018-03-08T10:33:27.158776469-08:00","Action":"output","Package":"pkg/failed","Test":"TestFail","Output":"--- FAIL: TestFail (0.18s)\n"}
|
||||
{"Time":"2018-03-08T10:33:27.158860934-08:00","Action":"fail","Package":"pkg/failed","Test":"TestFail","Elapsed":0.18}
|
||||
{"Time":"2018-03-08T10:33:27.161302093-08:00","Action":"fail","Package":"pkg/failed","Elapsed":0.204}
|
3
vendor/github.com/aristanetworks/goarista/cmd/occli/README.md
generated
vendored
3
vendor/github.com/aristanetworks/goarista/cmd/occli/README.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
# occli
|
||||
# DEPRECATED
|
||||
Please use [gnmi](../gnmi) instead.
|
29
vendor/github.com/aristanetworks/goarista/cmd/ockafka/README.md
generated
vendored
29
vendor/github.com/aristanetworks/goarista/cmd/ockafka/README.md
generated
vendored
@ -1,29 +0,0 @@
|
||||
# ockafka
|
||||
|
||||
Client for the gRPC OpenConfig service for subscribing to the configuration and
|
||||
state of a network device and feeding the stream to Kafka.
|
||||
|
||||
## Sample usage
|
||||
|
||||
Subscribe to all updates on the Arista device at `10.0.1.2` and stream to a local
|
||||
Kafka instance:
|
||||
|
||||
```
|
||||
ockafka -addrs 10.0.1.2
|
||||
```
|
||||
|
||||
Subscribe to temperature sensors from 2 switches and stream to a remote Kafka instance:
|
||||
|
||||
```
|
||||
ockafka -addrs 10.0.1.2,10.0.1.3 -kafkaaddrs kafka:9092 -subscribe /Sysdb/environment/temperature/status/tempSensor
|
||||
```
|
||||
|
||||
Start in a container:
|
||||
```
|
||||
docker run aristanetworks/ockafka -addrs 10.0.1.1 -kafkaaddrs kafka:9092
|
||||
```
|
||||
|
||||
## Kafka/Elastic integration demo
|
||||
The following video demoes integration with Kafka and Elastic using [this Logstash instance](https://github.com/aristanetworks/docker-logstash):
|
||||
|
||||
[](https://youtu.be/WsyFmxMwXYQ)
|
68
vendor/github.com/aristanetworks/goarista/cmd/ockafka/main.go
generated
vendored
68
vendor/github.com/aristanetworks/goarista/cmd/ockafka/main.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// The occlient tool is a client for the gRPC service for getting and setting the
|
||||
// OpenConfig configuration and state of a network device.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/aristanetworks/goarista/kafka"
|
||||
"github.com/aristanetworks/goarista/kafka/openconfig"
|
||||
"github.com/aristanetworks/goarista/kafka/producer"
|
||||
"github.com/aristanetworks/goarista/openconfig/client"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
var keysFlag = flag.String("kafkakeys", "",
|
||||
"Keys for kafka messages (comma-separated, default: the value of -addrs")
|
||||
|
||||
func newProducer(addresses []string, topic, key, dataset string) (producer.Producer, error) {
|
||||
encodedKey := sarama.StringEncoder(key)
|
||||
p, err := producer.New(openconfig.NewEncoder(topic, encodedKey, dataset), addresses, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create Kafka brokers: %s", err)
|
||||
}
|
||||
glog.Infof("Connected to Kafka brokers at %s", addresses)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
username, password, subscriptions, grpcAddrs, opts := client.ParseFlags()
|
||||
|
||||
if *keysFlag == "" {
|
||||
*keysFlag = strings.Join(grpcAddrs, ",")
|
||||
}
|
||||
keys := strings.Split(*keysFlag, ",")
|
||||
if len(grpcAddrs) != len(keys) {
|
||||
glog.Fatal("Please provide the same number of addresses and Kafka keys")
|
||||
}
|
||||
addresses := strings.Split(*kafka.Addresses, ",")
|
||||
wg := new(sync.WaitGroup)
|
||||
for i, grpcAddr := range grpcAddrs {
|
||||
key := keys[i]
|
||||
p, err := newProducer(addresses, *kafka.Topic, key, grpcAddr)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
} else {
|
||||
glog.Infof("Initialized Kafka producer for %s", grpcAddr)
|
||||
}
|
||||
publish := func(addr string, message proto.Message) {
|
||||
p.Write(message)
|
||||
}
|
||||
wg.Add(1)
|
||||
p.Start()
|
||||
defer p.Stop()
|
||||
c := client.New(username, password, grpcAddr, opts)
|
||||
go c.Subscribe(wg, subscriptions, publish)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
37
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/README.md
generated
vendored
37
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/README.md
generated
vendored
@ -1,37 +0,0 @@
|
||||
# ocprometheus
|
||||
|
||||
This is a client for the OpenConfig gRPC interface that pushes telemetry to
|
||||
Prometheus. Numerical and boolean (converted to 1 for true and 0 for false) are
|
||||
supported. Non-numerical data isn't supported by Prometheus and is silently
|
||||
dropped. Arrays (even with numeric values) are not yet supported.
|
||||
|
||||
This tool requires a config file to specify how to map the path of the
|
||||
notificatons coming out of the OpenConfig gRPC interface onto Prometheus
|
||||
metric names, and how to extract labels from the path. For example, the
|
||||
following rule, excerpt from `sampleconfig.yml`:
|
||||
|
||||
```yaml
|
||||
metrics:
|
||||
- name: tempSensor
|
||||
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
|
||||
help: Temperature and Maximum Temperature
|
||||
...
|
||||
```
|
||||
|
||||
Applied to an update for the path
|
||||
`/Sysdb/environment/temperature/status/tempSensor/TempSensor1/temperature/value`
|
||||
will lead to the metric name `tempSensor` and labels `sensor=TempSensor1` and `type=temperature`.
|
||||
|
||||
Basically, named groups are used to extract (optional) metrics.
|
||||
Unnamed groups will be given labels names like "unnamedLabelX" (where X is the group's position).
|
||||
The timestamps from the notifications are not preserved since Prometheus uses a pull model and
|
||||
doesn't have (yet) support for exporter specified timestamps.
|
||||
Prometheus 2.0 will probably support timestamps.
|
||||
|
||||
## Usage
|
||||
|
||||
See the `-help` output, but here's an example to push all the metrics defined
|
||||
in the sample config file:
|
||||
```
|
||||
ocprometheus -addr <switch-hostname>:6042 -config sampleconfig.json
|
||||
```
|
202
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector.go
generated
vendored
202
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// A metric source.
|
||||
type source struct {
|
||||
addr string
|
||||
path string
|
||||
}
|
||||
|
||||
// Since the labels are fixed per-path and per-device we can cache them here,
|
||||
// to avoid recomputing them.
|
||||
type labelledMetric struct {
|
||||
metric prometheus.Metric
|
||||
labels []string
|
||||
defaultValue float64
|
||||
stringMetric bool
|
||||
}
|
||||
|
||||
type collector struct {
|
||||
// Protects access to metrics map
|
||||
m sync.Mutex
|
||||
metrics map[source]*labelledMetric
|
||||
|
||||
config *Config
|
||||
}
|
||||
|
||||
func newCollector(config *Config) *collector {
|
||||
return &collector{
|
||||
metrics: make(map[source]*labelledMetric),
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Process a notification and update or create the corresponding metrics.
|
||||
func (c *collector) update(addr string, message proto.Message) {
|
||||
resp, ok := message.(*pb.SubscribeResponse)
|
||||
if !ok {
|
||||
glog.Errorf("Unexpected type of message: %T", message)
|
||||
return
|
||||
}
|
||||
|
||||
notif := resp.GetUpdate()
|
||||
if notif == nil {
|
||||
return
|
||||
}
|
||||
|
||||
device := strings.Split(addr, ":")[0]
|
||||
prefix := "/" + strings.Join(notif.Prefix.Element, "/")
|
||||
// Process deletes first
|
||||
for _, del := range notif.Delete {
|
||||
path := prefix + "/" + strings.Join(del.Element, "/")
|
||||
key := source{addr: device, path: path}
|
||||
c.m.Lock()
|
||||
delete(c.metrics, key)
|
||||
c.m.Unlock()
|
||||
}
|
||||
|
||||
// Process updates next
|
||||
for _, update := range notif.Update {
|
||||
// We only use JSON encoded values
|
||||
if update.Value == nil || update.Value.Type != pb.Encoding_JSON {
|
||||
glog.V(9).Infof("Ignoring incompatible update value in %s", update)
|
||||
continue
|
||||
}
|
||||
|
||||
path := prefix + "/" + strings.Join(update.Path.Element, "/")
|
||||
value, suffix, ok := parseValue(update)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var strUpdate bool
|
||||
var floatVal float64
|
||||
var strVal string
|
||||
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
strUpdate = false
|
||||
floatVal = v
|
||||
case string:
|
||||
strUpdate = true
|
||||
strVal = v
|
||||
}
|
||||
|
||||
if suffix != "" {
|
||||
path += "/" + suffix
|
||||
}
|
||||
|
||||
src := source{addr: device, path: path}
|
||||
c.m.Lock()
|
||||
// Use the cached labels and descriptor if available
|
||||
if m, ok := c.metrics[src]; ok {
|
||||
if strUpdate {
|
||||
// Skip string updates for non string metrics
|
||||
if !m.stringMetric {
|
||||
c.m.Unlock()
|
||||
continue
|
||||
}
|
||||
// Display a default value and replace the value label with the string value
|
||||
floatVal = m.defaultValue
|
||||
m.labels[len(m.labels)-1] = strVal
|
||||
}
|
||||
|
||||
m.metric = prometheus.MustNewConstMetric(m.metric.Desc(), prometheus.GaugeValue,
|
||||
floatVal, m.labels...)
|
||||
c.m.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
c.m.Unlock()
|
||||
// Get the descriptor and labels for this source
|
||||
metric := c.config.getMetricValues(src)
|
||||
if metric == nil || metric.desc == nil {
|
||||
glog.V(8).Infof("Ignoring unmatched update at %s:%s: %+v", device, path, update.Value)
|
||||
continue
|
||||
}
|
||||
|
||||
if strUpdate {
|
||||
if !metric.stringMetric {
|
||||
// Skip string updates for non string metrics
|
||||
continue
|
||||
}
|
||||
// Display a default value and replace the value label with the string value
|
||||
floatVal = metric.defaultValue
|
||||
metric.labels[len(metric.labels)-1] = strVal
|
||||
}
|
||||
|
||||
// Save the metric and labels in the cache
|
||||
c.m.Lock()
|
||||
lm := prometheus.MustNewConstMetric(metric.desc, prometheus.GaugeValue,
|
||||
floatVal, metric.labels...)
|
||||
c.metrics[src] = &labelledMetric{
|
||||
metric: lm,
|
||||
labels: metric.labels,
|
||||
defaultValue: metric.defaultValue,
|
||||
stringMetric: metric.stringMetric,
|
||||
}
|
||||
c.m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// ParseValue takes in an update and parses a value and suffix
|
||||
// Returns an interface that contains either a string or a float64 as well as a suffix
|
||||
// Unparseable updates return (0, empty string, false)
|
||||
func parseValue(update *pb.Update) (interface{}, string, bool) {
|
||||
var intf interface{}
|
||||
if err := json.Unmarshal(update.Value.Value, &intf); err != nil {
|
||||
glog.Errorf("Can't parse value in update %v: %v", update, err)
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
switch value := intf.(type) {
|
||||
case float64:
|
||||
return value, "", true
|
||||
case map[string]interface{}:
|
||||
if vIntf, ok := value["value"]; ok {
|
||||
if val, ok := vIntf.(float64); ok {
|
||||
return val, "value", true
|
||||
}
|
||||
}
|
||||
// float64 or string expected as the return value
|
||||
case bool:
|
||||
if value {
|
||||
return float64(1), "", true
|
||||
}
|
||||
return float64(0), "", true
|
||||
case string:
|
||||
return value, "", true
|
||||
default:
|
||||
glog.V(9).Infof("Ignoring update with unexpected type: %T", value)
|
||||
}
|
||||
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector interface
|
||||
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
c.config.getAllDescs(ch)
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector interface
|
||||
func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
||||
c.m.Lock()
|
||||
for _, m := range c.metrics {
|
||||
ch <- m.metric
|
||||
}
|
||||
c.m.Unlock()
|
||||
}
|
294
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector_test.go
generated
vendored
294
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector_test.go
generated
vendored
@ -1,294 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func makeMetrics(cfg *Config, expValues map[source]float64, notification *pb.Notification,
|
||||
prevMetrics map[source]*labelledMetric) map[source]*labelledMetric {
|
||||
|
||||
expMetrics := map[source]*labelledMetric{}
|
||||
if prevMetrics != nil {
|
||||
expMetrics = prevMetrics
|
||||
}
|
||||
for src, v := range expValues {
|
||||
metric := cfg.getMetricValues(src)
|
||||
if metric == nil || metric.desc == nil || metric.labels == nil {
|
||||
panic("cfg.getMetricValues returned nil")
|
||||
}
|
||||
// Preserve current value of labels
|
||||
labels := metric.labels
|
||||
if _, ok := expMetrics[src]; ok && expMetrics[src] != nil {
|
||||
labels = expMetrics[src].labels
|
||||
}
|
||||
|
||||
// Handle string updates
|
||||
if notification.Update != nil {
|
||||
if update, err := findUpdate(notification, src.path); err == nil {
|
||||
val, _, ok := parseValue(update)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if strVal, ok := val.(string); ok {
|
||||
if !metric.stringMetric {
|
||||
continue
|
||||
}
|
||||
v = metric.defaultValue
|
||||
labels[len(labels)-1] = strVal
|
||||
}
|
||||
}
|
||||
}
|
||||
expMetrics[src] = &labelledMetric{
|
||||
metric: prometheus.MustNewConstMetric(metric.desc, prometheus.GaugeValue, v,
|
||||
labels...),
|
||||
labels: labels,
|
||||
defaultValue: metric.defaultValue,
|
||||
stringMetric: metric.stringMetric,
|
||||
}
|
||||
}
|
||||
// Handle deletion
|
||||
for key := range expMetrics {
|
||||
if _, ok := expValues[key]; !ok {
|
||||
delete(expMetrics, key)
|
||||
}
|
||||
}
|
||||
return expMetrics
|
||||
}
|
||||
|
||||
func findUpdate(notif *pb.Notification, path string) (*pb.Update, error) {
|
||||
prefix := notif.Prefix.Element
|
||||
for _, v := range notif.Update {
|
||||
fullPath := "/" + strings.Join(append(prefix, v.Path.Element...), "/")
|
||||
if strings.Contains(path, fullPath) || path == fullPath {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Failed to find matching update for path %v", path)
|
||||
}
|
||||
|
||||
func makeResponse(notif *pb.Notification) *pb.SubscribeResponse {
|
||||
return &pb.SubscribeResponse{
|
||||
Response: &pb.SubscribeResponse_Update{Update: notif},
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
config := []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
- /Sysdb/bridging/igmpsnooping/forwarding/forwarding/status
|
||||
metrics:
|
||||
- name: fanName
|
||||
path: /Sysdb/environment/cooling/status/fan/name
|
||||
help: Fan Name
|
||||
valuelabel: name
|
||||
defaultvalue: 2.5
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/status/fan/speed/value
|
||||
help: Fan Speed
|
||||
- name: igmpSnoopingInf
|
||||
path: /Sysdb/igmpsnooping/vlanStatus/(?P<vlan>.+)/ethGroup/(?P<mac>.+)/intf/(?P<intf>.+)
|
||||
help: IGMP snooping status`)
|
||||
cfg, err := parseConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
coll := newCollector(cfg)
|
||||
|
||||
notif := &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("42"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "cooling", "status", "fan", "speed"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("{\"value\": 45}"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"igmpsnooping", "vlanStatus", "2050", "ethGroup",
|
||||
"01:00:5e:01:01:01", "intf", "Cpu"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("true"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "cooling", "status", "fan", "name"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("\"Fan1.1\""),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expValues := map[source]float64{
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
|
||||
}: 42,
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/environment/cooling/status/fan/speed/value",
|
||||
}: 45,
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu",
|
||||
}: 1,
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/environment/cooling/status/fan/name",
|
||||
}: 2.5,
|
||||
}
|
||||
|
||||
coll.update("10.1.1.1:6042", makeResponse(notif))
|
||||
expMetrics := makeMetrics(cfg, expValues, notif, nil)
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
|
||||
// Update two values, and one path which is not a metric
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("52"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "cooling", "status", "fan", "name"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("\"Fan2.1\""),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "doesntexist", "status"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("{\"value\": 45}"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
src := source{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
|
||||
}
|
||||
expValues[src] = 52
|
||||
|
||||
coll.update("10.1.1.1:6042", makeResponse(notif))
|
||||
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
|
||||
// Same path, different device
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("42"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
src.addr = "10.1.1.2"
|
||||
expValues[src] = 42
|
||||
|
||||
coll.update("10.1.1.2:6042", makeResponse(notif))
|
||||
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
|
||||
// Delete a path
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Delete: []*pb.Path{
|
||||
{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
},
|
||||
}
|
||||
src.addr = "10.1.1.1"
|
||||
delete(expValues, src)
|
||||
|
||||
coll.update("10.1.1.1:6042", makeResponse(notif))
|
||||
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
|
||||
// Non-numeric update to path without value label
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("\"test\""),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
coll.update("10.1.1.1:6042", makeResponse(notif))
|
||||
// Don't make new metrics as it should have no effect
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
}
|
142
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/config.go
generated
vendored
142
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/config.go
generated
vendored
@ -1,142 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Config is the representation of ocprometheus's YAML config file.
|
||||
type Config struct {
|
||||
// Per-device labels.
|
||||
DeviceLabels map[string]prometheus.Labels
|
||||
|
||||
// Prefixes to subscribe to.
|
||||
Subscriptions []string
|
||||
|
||||
// Metrics to collect and how to munge them.
|
||||
Metrics []*MetricDef
|
||||
}
|
||||
|
||||
// MetricDef is the representation of a metric definiton in the config file.
|
||||
type MetricDef struct {
|
||||
// Path is a regexp to match on the Update's full path.
|
||||
// The regexp must be a prefix match.
|
||||
// The regexp can define named capture groups to use as labels.
|
||||
Path string
|
||||
|
||||
// Path compiled as a regexp.
|
||||
re *regexp.Regexp `deepequal:"ignore"`
|
||||
|
||||
// Metric name.
|
||||
Name string
|
||||
|
||||
// Metric help string.
|
||||
Help string
|
||||
|
||||
// Label to store string values
|
||||
ValueLabel string
|
||||
|
||||
// Default value to display for string values
|
||||
DefaultValue float64
|
||||
|
||||
// Does the metric store a string value
|
||||
stringMetric bool
|
||||
|
||||
// This map contains the metric descriptors for this metric for each device.
|
||||
devDesc map[string]*prometheus.Desc
|
||||
|
||||
// This is the default metric descriptor for devices that don't have explicit descs.
|
||||
desc *prometheus.Desc
|
||||
}
|
||||
|
||||
// metricValues contains the values used in updating a metric
|
||||
type metricValues struct {
|
||||
desc *prometheus.Desc
|
||||
labels []string
|
||||
defaultValue float64
|
||||
stringMetric bool
|
||||
}
|
||||
|
||||
// Parses the config and creates the descriptors for each path and device.
|
||||
func parseConfig(cfg []byte) (*Config, error) {
|
||||
config := &Config{
|
||||
DeviceLabels: make(map[string]prometheus.Labels),
|
||||
}
|
||||
if err := yaml.Unmarshal(cfg, config); err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse config: %v", err)
|
||||
}
|
||||
for _, def := range config.Metrics {
|
||||
def.re = regexp.MustCompile(def.Path)
|
||||
// Extract label names
|
||||
reNames := def.re.SubexpNames()[1:]
|
||||
labelNames := make([]string, len(reNames))
|
||||
for i, n := range reNames {
|
||||
labelNames[i] = n
|
||||
if n == "" {
|
||||
labelNames[i] = "unnamedLabel" + strconv.Itoa(i+1)
|
||||
}
|
||||
}
|
||||
if def.ValueLabel != "" {
|
||||
labelNames = append(labelNames, def.ValueLabel)
|
||||
def.stringMetric = true
|
||||
}
|
||||
// Create a default descriptor only if there aren't any per-device labels,
|
||||
// or if it's explicitly declared
|
||||
if len(config.DeviceLabels) == 0 || len(config.DeviceLabels["*"]) > 0 {
|
||||
def.desc = prometheus.NewDesc(def.Name, def.Help, labelNames, config.DeviceLabels["*"])
|
||||
}
|
||||
// Add per-device descriptors
|
||||
def.devDesc = make(map[string]*prometheus.Desc)
|
||||
for device, labels := range config.DeviceLabels {
|
||||
if device == "*" {
|
||||
continue
|
||||
}
|
||||
def.devDesc[device] = prometheus.NewDesc(def.Name, def.Help, labelNames, labels)
|
||||
}
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Returns a struct containing the descriptor corresponding to the device and path, labels
|
||||
// extracted from the path, the default value for the metric and if it accepts string values.
|
||||
// If the device and path doesn't match any metrics, returns nil.
|
||||
func (c *Config) getMetricValues(s source) *metricValues {
|
||||
for _, def := range c.Metrics {
|
||||
if groups := def.re.FindStringSubmatch(s.path); groups != nil {
|
||||
if def.ValueLabel != "" {
|
||||
groups = append(groups, def.ValueLabel)
|
||||
}
|
||||
desc, ok := def.devDesc[s.addr]
|
||||
if !ok {
|
||||
desc = def.desc
|
||||
}
|
||||
return &metricValues{desc: desc, labels: groups[1:], defaultValue: def.DefaultValue,
|
||||
stringMetric: def.stringMetric}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sends all the descriptors to the channel.
|
||||
func (c *Config) getAllDescs(ch chan<- *prometheus.Desc) {
|
||||
for _, def := range c.Metrics {
|
||||
// Default descriptor might not be present
|
||||
if def.desc != nil {
|
||||
ch <- def.desc
|
||||
}
|
||||
|
||||
for _, desc := range def.devDesc {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
}
|
449
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/config_test.go
generated
vendored
449
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/config_test.go
generated
vendored
@ -1,449 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
tCases := []struct {
|
||||
input []byte
|
||||
config Config
|
||||
}{
|
||||
{
|
||||
input: []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: fanName
|
||||
path: /Sysdb/environment/cooling/status/fan/name
|
||||
help: Fan Name
|
||||
valuelabel: name
|
||||
defaultvalue: 25
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/fan/speed/value
|
||||
help: Fan Speed`),
|
||||
config: Config{
|
||||
DeviceLabels: map[string]prometheus.Labels{
|
||||
"10.1.1.1": {
|
||||
"lab1": "val1",
|
||||
"lab2": "val2",
|
||||
},
|
||||
"*": {
|
||||
"lab1": "val3",
|
||||
"lab2": "val4",
|
||||
},
|
||||
},
|
||||
Subscriptions: []string{
|
||||
"/Sysdb/environment/cooling/status",
|
||||
"/Sysdb/environment/power/status",
|
||||
},
|
||||
Metrics: []*MetricDef{
|
||||
{
|
||||
Path: "/Sysdb/environment/cooling/status/fan/name",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/environment/cooling/status/fan/name"),
|
||||
Name: "fanName",
|
||||
Help: "Fan Name",
|
||||
ValueLabel: "name",
|
||||
DefaultValue: 25,
|
||||
stringMetric: true,
|
||||
devDesc: map[string]*prometheus.Desc{
|
||||
"10.1.1.1": prometheus.NewDesc("fanName",
|
||||
"Fan Name",
|
||||
[]string{"name"},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
desc: prometheus.NewDesc("fanName",
|
||||
"Fan Name",
|
||||
[]string{"name"},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
},
|
||||
{
|
||||
Path: "/Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
|
||||
Name: "intfCounter",
|
||||
Help: "Per-Interface Bytes/Errors/Discards Counters",
|
||||
devDesc: map[string]*prometheus.Desc{
|
||||
"10.1.1.1": prometheus.NewDesc("intfCounter",
|
||||
"Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
desc: prometheus.NewDesc("intfCounter",
|
||||
"Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
},
|
||||
{
|
||||
Path: "/Sysdb/environment/cooling/fan/speed/value",
|
||||
re: regexp.MustCompile("/Sysdb/environment/cooling/fan/speed/value"),
|
||||
Name: "fanSpeed",
|
||||
Help: "Fan Speed",
|
||||
devDesc: map[string]*prometheus.Desc{
|
||||
"10.1.1.1": prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: []byte(`
|
||||
devicelabels:
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/fan/speed/value
|
||||
help: Fan Speed`),
|
||||
config: Config{
|
||||
DeviceLabels: map[string]prometheus.Labels{
|
||||
"*": {
|
||||
"lab1": "val3",
|
||||
"lab2": "val4",
|
||||
},
|
||||
},
|
||||
Subscriptions: []string{
|
||||
"/Sysdb/environment/cooling/status",
|
||||
"/Sysdb/environment/power/status",
|
||||
},
|
||||
Metrics: []*MetricDef{
|
||||
{
|
||||
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
|
||||
Name: "intfCounter",
|
||||
Help: "Per-Interface Bytes/Errors/Discards Counters",
|
||||
devDesc: map[string]*prometheus.Desc{},
|
||||
desc: prometheus.NewDesc("intfCounter",
|
||||
"Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"intf"},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
},
|
||||
{
|
||||
Path: "/Sysdb/environment/cooling/fan/speed/value",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/environment/cooling/fan/speed/value"),
|
||||
Name: "fanSpeed",
|
||||
Help: "Fan Speed",
|
||||
devDesc: map[string]*prometheus.Desc{},
|
||||
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/fan/speed/value
|
||||
help: Fan Speed`),
|
||||
config: Config{
|
||||
DeviceLabels: map[string]prometheus.Labels{
|
||||
"10.1.1.1": {
|
||||
"lab1": "val1",
|
||||
"lab2": "val2",
|
||||
},
|
||||
},
|
||||
Subscriptions: []string{
|
||||
"/Sysdb/environment/cooling/status",
|
||||
"/Sysdb/environment/power/status",
|
||||
},
|
||||
Metrics: []*MetricDef{
|
||||
{
|
||||
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
|
||||
Name: "intfCounter",
|
||||
Help: "Per-Interface Bytes/Errors/Discards Counters",
|
||||
devDesc: map[string]*prometheus.Desc{
|
||||
"10.1.1.1": prometheus.NewDesc("intfCounter",
|
||||
"Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"intf"},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "/Sysdb/environment/cooling/fan/speed/value",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/environment/cooling/fan/speed/value"),
|
||||
Name: "fanSpeed",
|
||||
Help: "Fan Speed",
|
||||
devDesc: map[string]*prometheus.Desc{
|
||||
"10.1.1.1": prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: []byte(`
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/fan/speed/value
|
||||
help: Fan Speed`),
|
||||
config: Config{
|
||||
DeviceLabels: map[string]prometheus.Labels{},
|
||||
Subscriptions: []string{
|
||||
"/Sysdb/environment/cooling/status",
|
||||
"/Sysdb/environment/power/status",
|
||||
},
|
||||
Metrics: []*MetricDef{
|
||||
{
|
||||
Path: "/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter"),
|
||||
Name: "intfCounter",
|
||||
Help: "Per-Interface Bytes/Errors/Discards Counters",
|
||||
devDesc: map[string]*prometheus.Desc{},
|
||||
desc: prometheus.NewDesc("intfCounter",
|
||||
"Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"intf"}, prometheus.Labels{}),
|
||||
},
|
||||
{
|
||||
Path: "/Sysdb/environment/cooling/fan/speed/value",
|
||||
re: regexp.MustCompile(
|
||||
"/Sysdb/environment/cooling/fan/speed/value"),
|
||||
Name: "fanSpeed",
|
||||
Help: "Fan Speed",
|
||||
devDesc: map[string]*prometheus.Desc{},
|
||||
desc: prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range tCases {
|
||||
cfg, err := parseConfig(c.input)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error in case %d: %v", i+1, err)
|
||||
continue
|
||||
}
|
||||
if !test.DeepEqual(*cfg, c.config) {
|
||||
t.Errorf("Test case %d: mismatch %v", i+1, test.Diff(*cfg, c.config))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMetricValues(t *testing.T) {
|
||||
config := []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/status/fan/speed/value
|
||||
help: Fan Speed`)
|
||||
cfg, err := parseConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
tCases := []struct {
|
||||
src source
|
||||
desc *prometheus.Desc
|
||||
labels []string
|
||||
}{
|
||||
{
|
||||
src: source{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
|
||||
},
|
||||
desc: prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
labels: []string{"lag", "Ethernet1"},
|
||||
},
|
||||
{
|
||||
src: source{
|
||||
addr: "10.2.2.2",
|
||||
path: "/Sysdb/lag/intfCounterDir/Ethernet1/intfCounter",
|
||||
},
|
||||
desc: prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
labels: []string{"lag", "Ethernet1"},
|
||||
},
|
||||
{
|
||||
src: source{
|
||||
addr: "10.2.2.2",
|
||||
path: "/Sysdb/environment/cooling/status/fan/speed/value",
|
||||
},
|
||||
desc: prometheus.NewDesc("fanSpeed", "Fan Speed",
|
||||
[]string{},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
labels: []string{},
|
||||
},
|
||||
{
|
||||
src: source{
|
||||
addr: "10.2.2.2",
|
||||
path: "/Sysdb/environment/nonexistent",
|
||||
},
|
||||
desc: nil,
|
||||
labels: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range tCases {
|
||||
metric := cfg.getMetricValues(c.src)
|
||||
if metric == nil {
|
||||
// Avoids error from trying to access metric.desc when metric is nil
|
||||
metric = &metricValues{}
|
||||
}
|
||||
if !test.DeepEqual(metric.desc, c.desc) {
|
||||
t.Errorf("Test case %d: desc mismatch %v", i+1, test.Diff(metric.desc, c.desc))
|
||||
}
|
||||
if !test.DeepEqual(metric.labels, c.labels) {
|
||||
t.Errorf("Test case %d: labels mismatch %v", i+1, test.Diff(metric.labels, c.labels))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllDescs(t *testing.T) {
|
||||
tCases := []struct {
|
||||
config []byte
|
||||
descs []*prometheus.Desc
|
||||
}{
|
||||
{
|
||||
config: []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/status/fan/speed/value
|
||||
help: Fan Speed`),
|
||||
descs: []*prometheus.Desc{
|
||||
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val3", "lab2": "val4"}),
|
||||
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
config: []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
metrics:
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/status/fan/speed/value
|
||||
help: Fan Speed`),
|
||||
descs: []*prometheus.Desc{
|
||||
prometheus.NewDesc("intfCounter", "Per-Interface Bytes/Errors/Discards Counters",
|
||||
[]string{"unnamedLabel1", "intf"},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
prometheus.NewDesc("fanSpeed", "Fan Speed", []string{},
|
||||
prometheus.Labels{"lab1": "val1", "lab2": "val2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range tCases {
|
||||
cfg, err := parseConfig(c.config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
ch := make(chan *prometheus.Desc, 10)
|
||||
cfg.getAllDescs(ch)
|
||||
j := 0
|
||||
for d := range ch {
|
||||
if !test.DeepEqual(c.descs[j], d) {
|
||||
t.Errorf("Test case %d: desc %d mismatch %v", i+1, j+1, test.Diff(c.descs[j], d))
|
||||
}
|
||||
j++
|
||||
if j == len(c.descs) {
|
||||
break
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ch:
|
||||
t.Errorf("Test case %d: too many descs", i+1)
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
93
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/main.go
generated
vendored
93
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/main.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// The ocprometheus implements a Prometheus exporter for OpenConfig telemetry data.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// gNMI options
|
||||
gNMIcfg := &gnmi.Config{}
|
||||
flag.StringVar(&gNMIcfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
|
||||
flag.StringVar(&gNMIcfg.CAFile, "cafile", "", "Path to server TLS certificate file")
|
||||
flag.StringVar(&gNMIcfg.CertFile, "certfile", "", "Path to client TLS certificate file")
|
||||
flag.StringVar(&gNMIcfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
|
||||
flag.StringVar(&gNMIcfg.Username, "username", "", "Username to authenticate with")
|
||||
flag.StringVar(&gNMIcfg.Password, "password", "", "Password to authenticate with")
|
||||
flag.BoolVar(&gNMIcfg.TLS, "tls", false, "Enable TLS")
|
||||
subscribePaths := flag.String("subscribe", "/", "Comma-separated list of paths to subscribe to")
|
||||
|
||||
// program options
|
||||
listenaddr := flag.String("listenaddr", ":8080", "Address on which to expose the metrics")
|
||||
url := flag.String("url", "/metrics", "URL where to expose the metrics")
|
||||
configFlag := flag.String("config", "",
|
||||
"Config to turn OpenConfig telemetry into Prometheus metrics")
|
||||
|
||||
flag.Parse()
|
||||
subscriptions := strings.Split(*subscribePaths, ",")
|
||||
if *configFlag == "" {
|
||||
glog.Fatal("You need specify a config file using -config flag")
|
||||
}
|
||||
cfg, err := ioutil.ReadFile(*configFlag)
|
||||
if err != nil {
|
||||
glog.Fatalf("Can't read config file %q: %v", *configFlag, err)
|
||||
}
|
||||
config, err := parseConfig(cfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
// Ignore the default "subscribe-to-everything" subscription of the
|
||||
// -subscribe flag.
|
||||
if subscriptions[0] == "/" {
|
||||
subscriptions = subscriptions[1:]
|
||||
}
|
||||
// Add the subscriptions from the config file.
|
||||
subscriptions = append(subscriptions, config.Subscriptions...)
|
||||
|
||||
coll := newCollector(config)
|
||||
prometheus.MustRegister(coll)
|
||||
ctx := gnmi.NewContext(context.Background(), gNMIcfg)
|
||||
client, err := gnmi.Dial(gNMIcfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
respChan := make(chan *pb.SubscribeResponse)
|
||||
errChan := make(chan error)
|
||||
subscribeOptions := &gnmi.SubscribeOptions{
|
||||
Mode: "stream",
|
||||
StreamMode: "target_defined",
|
||||
Paths: gnmi.SplitPaths(subscriptions),
|
||||
}
|
||||
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
|
||||
go handleSubscription(respChan, errChan, coll, gNMIcfg.Addr)
|
||||
http.Handle(*url, promhttp.Handler())
|
||||
glog.Fatal(http.ListenAndServe(*listenaddr, nil))
|
||||
}
|
||||
|
||||
func handleSubscription(respChan chan *pb.SubscribeResponse,
|
||||
errChan chan error, coll *collector, addr string) {
|
||||
for {
|
||||
select {
|
||||
case resp := <-respChan:
|
||||
coll.update(addr, resp)
|
||||
case err := <-errChan:
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
80
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/sampleconfig_above_4.20.yml
generated
vendored
80
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/sampleconfig_above_4.20.yml
generated
vendored
@ -1,80 +0,0 @@
|
||||
# Per-device labels. Optional
|
||||
# Exactly the same set of labels must be specified for each device.
|
||||
# If device address is *, the labels apply to all devices not listed explicitly.
|
||||
# If any explicit device if listed below, then you need to specify all devices you're subscribed to,
|
||||
# or have a wildcard entry. Otherwise, updates from non-listed devices will be ignored.
|
||||
#deviceLabels:
|
||||
# 10.1.1.1:
|
||||
# lab1: val1
|
||||
# lab2: val2
|
||||
# '*':
|
||||
# lab1: val3
|
||||
# lab2: val4
|
||||
|
||||
# Subscriptions to OpenConfig paths.
|
||||
subscriptions:
|
||||
- /Smash/counters/ethIntf
|
||||
- /Smash/interface/counter/lag/current/counter
|
||||
- /Sysdb/environment/archer/cooling/status
|
||||
- /Sysdb/environment/archer/power/status
|
||||
- /Sysdb/environment/archer/temperature/status
|
||||
- /Sysdb/hardware/archer/xcvr/status
|
||||
- /Sysdb/interface/config/eth
|
||||
|
||||
# Prometheus metrics configuration.
|
||||
# If you use named capture groups in the path, they will be extracted into labels with the same name.
|
||||
# All fields are mandatory.
|
||||
metrics:
|
||||
- name: interfaceDescription
|
||||
path: /Sysdb/interface/config/eth/phy/slice/1/intfConfig/(?P<interface>Ethernet.)/description
|
||||
help: Description
|
||||
valuelabel: description
|
||||
defaultvalue: 15
|
||||
- name: intfCounter
|
||||
path: /Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: intfLagCounter
|
||||
path: /Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)
|
||||
help: Per-Lag Bytes/Errors/Discards Counters
|
||||
- name: intfPktCounter
|
||||
path: /Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)
|
||||
help: Per-Interface Unicast/Multicast/Broadcast Packer Counters
|
||||
- name: intfLagPktCounter
|
||||
path: /Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)
|
||||
help: Per-Lag Unicast/Multicast/Broadcast Packer Counters
|
||||
- name: intfPfcClassCounter
|
||||
path: /Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)
|
||||
help: Per-Interface Input/Output PFC Frames Counters
|
||||
- name: tempSensor
|
||||
path: /Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/((?:maxT|t)emperature)
|
||||
help: Temperature and Maximum Temperature
|
||||
- name: tempSensorAlert
|
||||
path: /Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/(alertRaisedCount)
|
||||
help: Temperature Alerts Counter
|
||||
- name: currentSensor
|
||||
path: /Sysdb/(environment)/archer/power/status/currentSensor/(?P<sensor>.+)/(current)
|
||||
help: Current Levels
|
||||
- name: powerSensor
|
||||
path: /Sysdb/(environment)/archer/(power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power
|
||||
help: Input/Output Power Levels
|
||||
- name: voltageSensor
|
||||
path: /Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(voltage)
|
||||
help: Voltage Levels
|
||||
- name: railCurrentSensor
|
||||
path: /Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(current)
|
||||
help: Rail Current Levels
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/(environment)/archer/(cooling)/status/(?P<fan>.+)/speed
|
||||
help: Fan Speed
|
||||
- name: qsfpModularRxPower
|
||||
path: /Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)
|
||||
help: qsfpModularRxPower
|
||||
- name: qsfpFixedRxPower
|
||||
path: /Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)
|
||||
help: qsfpFixedRxPower
|
||||
- name: sfpModularTemperature
|
||||
path: /Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/lastDomUpdateTime/(temperature)
|
||||
help: sfpModularTemperature
|
||||
- name: sfpFixedTemperature
|
||||
path: /Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/lastDomUpdateTime/(temperature)
|
||||
help: sfpFixedTemperature
|
62
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/sampleconfig_below_4.20.yml
generated
vendored
62
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/sampleconfig_below_4.20.yml
generated
vendored
@ -1,62 +0,0 @@
|
||||
# Per-device labels. Optional
|
||||
# Exactly the same set of labels must be specified for each device.
|
||||
# If device address is *, the labels apply to all devices not listed explicitly.
|
||||
# If any explicit device if listed below, then you need to specify all devices you're subscribed to,
|
||||
# or have a wildcard entry. Otherwise, updates from non-listed devices will be ignored.
|
||||
deviceLabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
|
||||
# Subscriptions to OpenConfig paths.
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
- /Sysdb/environment/temperature/status
|
||||
- /Sysdb/interface/counter/eth/lag
|
||||
- /Sysdb/interface/counter/eth/slice/phy
|
||||
- /Sysdb/interface/config
|
||||
- /Sysdb/interface/config/eth/phy/slice/1/intfConfig
|
||||
|
||||
# Prometheus metrics configuration.
|
||||
# If you use named capture groups in the path, they will be extracted into labels with the same name.
|
||||
# All fields are mandatory.
|
||||
metrics:
|
||||
- name: interfaceDescription
|
||||
path: Sysdb/interface/config/eth/phy/slice/1/intfConfig/(?P<interface>Ethernet.)/description
|
||||
help: Description
|
||||
valuelabel: description
|
||||
defaultvalue: 15
|
||||
- name: intfCounter
|
||||
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(Octets|Errors|Discards))
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: intfPktCounter
|
||||
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))Pkt
|
||||
help: Per-Interface Unicast/Multicast/Broadcast Packer Counters
|
||||
- name: intfPfcClassCounter
|
||||
path: /Sysdb/interface/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/ethStatistics/(?P<direction>(?:in|out))PfcClassFrames
|
||||
help: Per-Interface Input/Output PFC Frames Counters
|
||||
- name: tempSensor
|
||||
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
|
||||
help: Temperature and Maximum Temperature
|
||||
- name: tempSensorAlert
|
||||
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/alertRaisedCount
|
||||
help: Temperature Alerts Counter
|
||||
- name: currentSensor
|
||||
path: /Sysdb/environment/power/status/currentSensor/(?P<sensor>.+)/current/value
|
||||
help: Current Levels
|
||||
- name: powerSensor
|
||||
path: /Sysdb/environment/power/status/powerSupply/(?P<sensor>.+)/(?P<direction>(input|output))Power/value
|
||||
help: Input/Output Power Levels
|
||||
- name: voltageSensor
|
||||
path: /Sysdb/environment/power/status/voltageSensor/(?P<sensor>.+)/voltage/value
|
||||
help: Voltage Levels
|
||||
- name: railCurrentSensor
|
||||
path: /Sysdb/environment/power/status/voltageSensor/(?P<sensor>.+)/current/value
|
||||
help: Rail Current Levels
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/status/fan/(?P<fan>.+)/speed/value
|
||||
help: Fan Speed
|
21
vendor/github.com/aristanetworks/goarista/cmd/ocredis/README.md
generated
vendored
21
vendor/github.com/aristanetworks/goarista/cmd/ocredis/README.md
generated
vendored
@ -1,21 +0,0 @@
|
||||
# ocredis
|
||||
|
||||
This is a client for the OpenConfig gRPC interface that publishes data to
|
||||
Redis. Values are stored in JSON. Every update is pushed to Redis twice:
|
||||
|
||||
1. as a [hash map](http://redis.io/topics/data-types-intro#hashes) update,
|
||||
where the path in Redis is the path to the entity or collection (aka
|
||||
container or list, in YANG speak) and the keys of the hash are the
|
||||
attributes (leaf names, in YANG speak).
|
||||
2. as a [`PUBLISH`](http://redis.io/commands/publish) command sent onto
|
||||
the path to the entity or collection, so that consumers can receive
|
||||
updates in a streaming fashion from Redis.
|
||||
|
||||
## Usage
|
||||
|
||||
See the `-help` output, but here's an example to push all the temperature
|
||||
sensors into Redis. You can also not pass any `-subscribe` flag to push
|
||||
_everything_ into Redis.
|
||||
```
|
||||
ocredis -subscribe /Sysdb/environment/temperature -addr <switch-hostname>:6042 -redis <redis-hostname>:6379
|
||||
```
|
208
vendor/github.com/aristanetworks/goarista/cmd/ocredis/main.go
generated
vendored
208
vendor/github.com/aristanetworks/goarista/cmd/ocredis/main.go
generated
vendored
@ -1,208 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// The ocredis tool is a client for the OpenConfig gRPC interface that
|
||||
// subscribes to state and pushes it to Redis, using Redis' support for hash
|
||||
// maps and for publishing events that can be subscribed to.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"strings"
|
||||
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
redis "gopkg.in/redis.v4"
|
||||
)
|
||||
|
||||
var clusterMode = flag.Bool("cluster", false, "Whether the redis server is a cluster")
|
||||
|
||||
var redisFlag = flag.String("redis", "",
|
||||
"Comma separated list of Redis servers to push updates to")
|
||||
|
||||
var redisPassword = flag.String("redispass", "", "Password of redis server/cluster")
|
||||
|
||||
// baseClient allows us to represent both a redis.Client and redis.ClusterClient.
|
||||
type baseClient interface {
|
||||
Close() error
|
||||
ClusterInfo() *redis.StringCmd
|
||||
HDel(string, ...string) *redis.IntCmd
|
||||
HMSet(string, map[string]string) *redis.StatusCmd
|
||||
Ping() *redis.StatusCmd
|
||||
Pipelined(func(*redis.Pipeline) error) ([]redis.Cmder, error)
|
||||
Publish(string, string) *redis.IntCmd
|
||||
}
|
||||
|
||||
var client baseClient
|
||||
|
||||
func main() {
|
||||
|
||||
// gNMI options
|
||||
cfg := &gnmi.Config{}
|
||||
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
|
||||
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
|
||||
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
|
||||
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
|
||||
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
|
||||
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
|
||||
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
|
||||
subscribePaths := flag.String("subscribe", "/", "Comma-separated list of paths to subscribe to")
|
||||
flag.Parse()
|
||||
if *redisFlag == "" {
|
||||
glog.Fatal("Specify the address of the Redis server to write to with -redis")
|
||||
}
|
||||
|
||||
subscriptions := strings.Split(*subscribePaths, ",")
|
||||
redisAddrs := strings.Split(*redisFlag, ",")
|
||||
if !*clusterMode && len(redisAddrs) > 1 {
|
||||
glog.Fatal("Please pass only 1 redis address in noncluster mode or enable cluster mode")
|
||||
}
|
||||
|
||||
if *clusterMode {
|
||||
client = redis.NewClusterClient(&redis.ClusterOptions{
|
||||
Addrs: redisAddrs,
|
||||
Password: *redisPassword,
|
||||
})
|
||||
} else {
|
||||
client = redis.NewClient(&redis.Options{
|
||||
Addr: *redisFlag,
|
||||
Password: *redisPassword,
|
||||
})
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// TODO: Figure out ways to handle being in the wrong mode:
|
||||
// Connecting to cluster in non cluster mode - we get a MOVED error on the first HMSET
|
||||
// Connecting to a noncluster in cluster mode - we get stuck forever
|
||||
_, err := client.Ping().Result()
|
||||
if err != nil {
|
||||
glog.Fatal("Failed to connect to client: ", err)
|
||||
}
|
||||
ctx := gnmi.NewContext(context.Background(), cfg)
|
||||
client, err := gnmi.Dial(cfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
respChan := make(chan *pb.SubscribeResponse)
|
||||
errChan := make(chan error)
|
||||
subscribeOptions := &gnmi.SubscribeOptions{
|
||||
Mode: "stream",
|
||||
StreamMode: "target_defined",
|
||||
Paths: gnmi.SplitPaths(subscriptions),
|
||||
}
|
||||
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
|
||||
for {
|
||||
select {
|
||||
case resp := <-respChan:
|
||||
bufferToRedis(cfg.Addr, resp.GetUpdate())
|
||||
case err := <-errChan:
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type redisData struct {
|
||||
key string
|
||||
hmset map[string]string
|
||||
hdel []string
|
||||
pub map[string]interface{}
|
||||
}
|
||||
|
||||
func bufferToRedis(addr string, notif *pb.Notification) {
|
||||
if notif == nil {
|
||||
// possible that this should be ignored silently
|
||||
glog.Error("Nil notification ignored")
|
||||
return
|
||||
}
|
||||
path := addr + "/" + joinPath(notif.Prefix)
|
||||
data := &redisData{key: path}
|
||||
|
||||
if len(notif.Update) != 0 {
|
||||
hmset := make(map[string]string, len(notif.Update))
|
||||
|
||||
// Updates to publish on the pub/sub.
|
||||
pub := make(map[string]interface{}, len(notif.Update))
|
||||
for _, update := range notif.Update {
|
||||
key := joinPath(update.Path)
|
||||
value := convertUpdate(update)
|
||||
pub[key] = value
|
||||
marshaledValue, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to JSON marshal update %#v", update)
|
||||
}
|
||||
hmset[key] = string(marshaledValue)
|
||||
}
|
||||
data.hmset = hmset
|
||||
data.pub = pub
|
||||
}
|
||||
|
||||
if len(notif.Delete) != 0 {
|
||||
hdel := make([]string, len(notif.Delete))
|
||||
for i, del := range notif.Delete {
|
||||
hdel[i] = joinPath(del)
|
||||
}
|
||||
data.hdel = hdel
|
||||
}
|
||||
pushToRedis(data)
|
||||
}
|
||||
|
||||
func pushToRedis(data *redisData) {
|
||||
_, err := client.Pipelined(func(pipe *redis.Pipeline) error {
|
||||
if data.hmset != nil {
|
||||
if reply := client.HMSet(data.key, data.hmset); reply.Err() != nil {
|
||||
glog.Fatal("Redis HMSET error: ", reply.Err())
|
||||
}
|
||||
redisPublish(data.key, "updates", data.pub)
|
||||
}
|
||||
if data.hdel != nil {
|
||||
if reply := client.HDel(data.key, data.hdel...); reply.Err() != nil {
|
||||
glog.Fatal("Redis HDEL error: ", reply.Err())
|
||||
}
|
||||
redisPublish(data.key, "deletes", data.hdel)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatal("Failed to send Pipelined commands: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func redisPublish(path, kind string, payload interface{}) {
|
||||
js, err := json.Marshal(map[string]interface{}{
|
||||
"kind": kind,
|
||||
"payload": payload,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("JSON error: %s", err)
|
||||
}
|
||||
if reply := client.Publish(path, string(js)); reply.Err() != nil {
|
||||
glog.Fatal("Redis PUBLISH error: ", reply.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func joinPath(path *pb.Path) string {
|
||||
// path.Elem is empty for some reason so using path.Element instead
|
||||
return strings.Join(path.Element, "/")
|
||||
}
|
||||
|
||||
func convertUpdate(update *pb.Update) interface{} {
|
||||
switch update.Value.Type {
|
||||
case pb.Encoding_JSON:
|
||||
var value interface{}
|
||||
err := json.Unmarshal(update.Value.Value, &value)
|
||||
if err != nil {
|
||||
glog.Fatalf("Malformed JSON update %q in %s", update.Value.Value, update)
|
||||
}
|
||||
return value
|
||||
case pb.Encoding_BYTES:
|
||||
return update.Value.Value
|
||||
default:
|
||||
glog.Fatalf("Unhandled type of value %v in %s", update.Value.Type, update)
|
||||
return nil
|
||||
}
|
||||
}
|
12
vendor/github.com/aristanetworks/goarista/cmd/ocsplunk/README.md
generated
vendored
12
vendor/github.com/aristanetworks/goarista/cmd/ocsplunk/README.md
generated
vendored
@ -1,12 +0,0 @@
|
||||
# ocsplunk
|
||||
|
||||
Client for the gRPC OpenConfig service which subscribes to the configuration and
|
||||
state of a network device and sends it to the Splunk HTTP Event Collector.
|
||||
|
||||
## Sample usage
|
||||
|
||||
```
|
||||
ocsplunk -addr 10.0.1.2 -splunkurls https://splunk:8088 -splunktoken 00000000-0000-0000-0000-000000000000
|
||||
```
|
||||
|
||||

|
126
vendor/github.com/aristanetworks/goarista/cmd/ocsplunk/main.go
generated
vendored
126
vendor/github.com/aristanetworks/goarista/cmd/ocsplunk/main.go
generated
vendored
@ -1,126 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
"github.com/aristanetworks/splunk-hec-go"
|
||||
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
func exitWithError(s string) {
|
||||
fmt.Fprintln(os.Stderr, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// gNMI options
|
||||
cfg := &gnmi.Config{}
|
||||
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
|
||||
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
|
||||
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
|
||||
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
|
||||
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
|
||||
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
|
||||
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
|
||||
subscribePaths := flag.String("paths", "/", "Comma-separated list of paths to subscribe to")
|
||||
|
||||
// Splunk options
|
||||
splunkURLs := flag.String("splunkurls", "https://localhost:8088",
|
||||
"Comma-separated list of URLs of the Splunk servers")
|
||||
splunkToken := flag.String("splunktoken", "", "Token to connect to the Splunk servers")
|
||||
splunkIndex := flag.String("splunkindex", "", "Index for the data in Splunk")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
// gNMI connection
|
||||
ctx := gnmi.NewContext(context.Background(), cfg)
|
||||
// Store the address without the port so it can be used as the host in the Splunk event.
|
||||
addr := cfg.Addr
|
||||
client, err := gnmi.Dial(cfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
// Splunk connection
|
||||
urls := strings.Split(*splunkURLs, ",")
|
||||
cluster := hec.NewCluster(urls, *splunkToken)
|
||||
cluster.SetHTTPClient(&http.Client{
|
||||
Transport: &http.Transport{
|
||||
// TODO: add flags for TLS
|
||||
TLSClientConfig: &tls.Config{
|
||||
// TODO: add flag to enable TLS
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// gNMI subscription
|
||||
respChan := make(chan *pb.SubscribeResponse)
|
||||
errChan := make(chan error)
|
||||
defer close(errChan)
|
||||
paths := strings.Split(*subscribePaths, ",")
|
||||
subscribeOptions := &gnmi.SubscribeOptions{
|
||||
Mode: "stream",
|
||||
StreamMode: "target_defined",
|
||||
Paths: gnmi.SplitPaths(paths),
|
||||
}
|
||||
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
|
||||
|
||||
// Forward subscribe responses to Splunk
|
||||
for {
|
||||
select {
|
||||
// We got a subscribe response
|
||||
case resp := <-respChan:
|
||||
response := resp.GetResponse()
|
||||
update, ok := response.(*pb.SubscribeResponse_Update)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert the response into a map[string]interface{}
|
||||
notification, err := gnmi.NotificationToMap(update.Update)
|
||||
if err != nil {
|
||||
exitWithError(err.Error())
|
||||
}
|
||||
|
||||
// Build the Splunk event
|
||||
path := notification["path"].(string)
|
||||
delete(notification, "path")
|
||||
timestamp := notification["timestamp"].(int64)
|
||||
delete(notification, "timestamp")
|
||||
// Should this be configurable?
|
||||
sourceType := "openconfig"
|
||||
event := &hec.Event{
|
||||
Host: &addr,
|
||||
Index: splunkIndex,
|
||||
Source: &path,
|
||||
SourceType: &sourceType,
|
||||
Event: notification,
|
||||
}
|
||||
event.SetTime(time.Unix(timestamp/1e9, timestamp%1e9))
|
||||
|
||||
// Write the event to Splunk
|
||||
if err := cluster.WriteEvent(event); err != nil {
|
||||
exitWithError("failed to write event: " + err.Error())
|
||||
}
|
||||
|
||||
// We got an error
|
||||
case err := <-errChan:
|
||||
exitWithError(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
BIN
vendor/github.com/aristanetworks/goarista/cmd/ocsplunk/preview.png
generated
vendored
BIN
vendor/github.com/aristanetworks/goarista/cmd/ocsplunk/preview.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 434 KiB |
56
vendor/github.com/aristanetworks/goarista/cmd/octsdb/README.md
generated
vendored
56
vendor/github.com/aristanetworks/goarista/cmd/octsdb/README.md
generated
vendored
@ -1,56 +0,0 @@
|
||||
# octsdb
|
||||
|
||||
This is a client for the OpenConfig gRPC interface that pushes telemetry to
|
||||
OpenTSDB. Non-numerical data isn't supported by OpenTSDB and is silently
|
||||
dropped.
|
||||
|
||||
This tool requires a config file to specify how to map the path of the
|
||||
notificatons coming out of the OpenConfig gRPC interface onto OpenTSDB
|
||||
metric names, and how to extract tags from the path.
|
||||
|
||||
## Getting Started
|
||||
To begin, a list of subscriptions is required (excerpt from `sampleconfig.json`):
|
||||
|
||||
```json
|
||||
"subscriptions": [
|
||||
"/Sysdb/interface/counter/eth/lag",
|
||||
"/Sysdb/interface/counter/eth/slice/phy",
|
||||
|
||||
"/Sysdb/environment/temperature/status",
|
||||
"/Sysdb/environment/cooling/status",
|
||||
"/Sysdb/environment/power/status",
|
||||
|
||||
"/Sysdb/hardware/xcvr/status/all/xcvrStatus"
|
||||
],
|
||||
...
|
||||
```
|
||||
|
||||
Note that subscriptions should not end with a trailing `/` as that will cause
|
||||
the subscription to fail.
|
||||
|
||||
Afterwards, the metrics are defined (excerpt from `sampleconfig.json`):
|
||||
|
||||
```json
|
||||
"metrics": {
|
||||
"tempSensor": {
|
||||
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/((?:maxT|t)emperature)"
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
In the metrics path, unnamed matched groups are used to make up the metric name, and named matched groups
|
||||
are used to extract optional tags. Note that unnamed groups are required, otherwise the metric
|
||||
name will be empty and the update will be silently dropped.
|
||||
|
||||
For example, using the above metrics path applied to an update for the path
|
||||
`/Sysdb/environment/temperature/status/tempSensor/TempSensor1/temperature`
|
||||
will lead to the metric name `environment.temperature` and tags `sensor=TempSensor1`.
|
||||
|
||||
## Usage
|
||||
|
||||
See the `-help` output, but here's an example to push all the metrics defined
|
||||
in the sample config file:
|
||||
```
|
||||
octsdb -addr <switch-hostname>:6042 -config sampleconfig.json -text | nc <tsd-hostname> 4242
|
||||
```
|
92
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config.go
generated
vendored
92
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config is the representation of octsdb's JSON config file.
|
||||
type Config struct {
|
||||
// Prefixes to subscribe to.
|
||||
Subscriptions []string
|
||||
|
||||
// MetricPrefix, if set, is used to prefix all the metric names.
|
||||
MetricPrefix string
|
||||
|
||||
// Metrics to collect and how to munge them.
|
||||
Metrics map[string]*Metric
|
||||
}
|
||||
|
||||
// A Metric to collect and how to massage it into an OpenTSDB put.
|
||||
type Metric struct {
|
||||
// Path is a regexp to match on the Update's full path.
|
||||
// The regexp must be a prefix match.
|
||||
// The regexp can define named capture groups to use as tags.
|
||||
Path string
|
||||
|
||||
// Path compiled as a regexp.
|
||||
re *regexp.Regexp
|
||||
|
||||
// Additional tags to add to this metric.
|
||||
Tags map[string]string
|
||||
}
|
||||
|
||||
func loadConfig(path string) (*Config, error) {
|
||||
cfg, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to load config: %v", err)
|
||||
}
|
||||
config := new(Config)
|
||||
err = json.Unmarshal(cfg, config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse config: %v", err)
|
||||
}
|
||||
for _, metric := range config.Metrics {
|
||||
metric.re = regexp.MustCompile(metric.Path)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Match applies this config to the given OpenConfig path.
|
||||
// If the path doesn't match anything in the config, an empty string
|
||||
// is returned as the metric name.
|
||||
func (c *Config) Match(path string) (metricName string, tags map[string]string) {
|
||||
tags = make(map[string]string)
|
||||
|
||||
for _, metric := range c.Metrics {
|
||||
found := metric.re.FindStringSubmatch(path)
|
||||
if found == nil {
|
||||
continue
|
||||
}
|
||||
for i, name := range metric.re.SubexpNames() {
|
||||
if i == 0 {
|
||||
continue
|
||||
} else if name == "" {
|
||||
if metricName != "" {
|
||||
metricName += "/"
|
||||
}
|
||||
metricName += found[i]
|
||||
} else {
|
||||
tags[name] = found[i]
|
||||
}
|
||||
}
|
||||
for tag, value := range metric.Tags {
|
||||
tags[tag] = value
|
||||
}
|
||||
break
|
||||
}
|
||||
if metricName != "" {
|
||||
metricName = strings.ToLower(strings.Replace(metricName, "/", ".", -1))
|
||||
if c.MetricPrefix != "" {
|
||||
metricName = c.MetricPrefix + "." + metricName
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
76
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config_test.go
generated
vendored
76
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config_test.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
cfg, err := loadConfig("/nonexistent.json")
|
||||
if err == nil {
|
||||
t.Fatal("Managed to load a nonexistent config!")
|
||||
}
|
||||
cfg, err = loadConfig("sampleconfig.json")
|
||||
if err != nil {
|
||||
t.Fatal("Failed to load config:", err)
|
||||
}
|
||||
|
||||
testcases := []struct {
|
||||
path string
|
||||
metric string
|
||||
tags map[string]string
|
||||
}{{
|
||||
path: "/Sysdb/environment/cooling/status/fan/Fan1/1/speed/value",
|
||||
metric: "eos.environment.fan.speed",
|
||||
tags: map[string]string{"fan": "Fan1/1"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/power/status/powerSupply/PowerSupply2/outputPower/value",
|
||||
metric: "eos.environment.power.output",
|
||||
tags: map[string]string{"sensor": "PowerSupply2"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/power/status/voltageSensor/VoltageSensor23/voltage/value",
|
||||
metric: "eos.environment.voltage",
|
||||
tags: map[string]string{"sensor": "VoltageSensor23"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/power/status/currentSensor/CurrentSensorP2/1/current/value",
|
||||
metric: "eos.environment.current",
|
||||
tags: map[string]string{"sensor": "CurrentSensorP2/1"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/temperature/status/tempSensor/" +
|
||||
"TempSensorP2/1/maxTemperature/value",
|
||||
metric: "eos.environment.maxtemperature",
|
||||
tags: map[string]string{"sensor": "TempSensorP2/1"},
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/lag/intfCounterDir/" +
|
||||
"Port-Channel201/intfCounter/current/statistics/outUcastPkts",
|
||||
metric: "eos.interface.pkt",
|
||||
tags: map[string]string{"intf": "Port-Channel201", "direction": "out", "type": "Ucast"},
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
|
||||
"Ethernet42/intfCounter/current/statistics/inUcastPkts",
|
||||
metric: "eos.interface.pkt",
|
||||
tags: map[string]string{"intf": "Ethernet42", "direction": "in", "type": "Ucast"},
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
|
||||
"Ethernet42/intfCounter/lastClear/statistics/inErrors",
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
|
||||
"Ethernet42/intfCounter/current/ethStatistics/outPfcClassFrames",
|
||||
metric: "eos.interface.pfcclassframes",
|
||||
tags: map[string]string{"intf": "Ethernet42", "direction": "out"},
|
||||
}}
|
||||
for i, tcase := range testcases {
|
||||
actualMetric, actualTags := cfg.Match(tcase.path)
|
||||
if actualMetric != tcase.metric {
|
||||
t.Errorf("#%d expected metric %q but got %q", i, tcase.metric, actualMetric)
|
||||
}
|
||||
if d := test.Diff(tcase.tags, actualTags); actualMetric != "" && d != "" {
|
||||
t.Errorf("#%d expected tags %q but got %q: %s", i, tcase.tags, actualTags, d)
|
||||
}
|
||||
}
|
||||
}
|
228
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main.go
generated
vendored
228
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main.go
generated
vendored
@ -1,228 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// The octsdb tool pushes OpenConfig telemetry to OpenTSDB.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
// gNMI options
|
||||
cfg := &gnmi.Config{}
|
||||
flag.StringVar(&cfg.Addr, "addr", "localhost", "gNMI gRPC server `address`")
|
||||
flag.StringVar(&cfg.CAFile, "cafile", "", "Path to server TLS certificate file")
|
||||
flag.StringVar(&cfg.CertFile, "certfile", "", "Path to client TLS certificate file")
|
||||
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
|
||||
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
|
||||
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
|
||||
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
|
||||
|
||||
// Program options
|
||||
subscribePaths := flag.String("paths", "/", "Comma-separated list of paths to subscribe to")
|
||||
|
||||
tsdbFlag := flag.String("tsdb", "",
|
||||
"Address of the OpenTSDB server where to push telemetry to")
|
||||
textFlag := flag.Bool("text", false,
|
||||
"Print the output as simple text")
|
||||
configFlag := flag.String("config", "",
|
||||
"Config to turn OpenConfig telemetry into OpenTSDB put requests")
|
||||
isUDPServerFlag := flag.Bool("isudpserver", false,
|
||||
"Set to true to run as a UDP to TCP to OpenTSDB server.")
|
||||
udpAddrFlag := flag.String("udpaddr", "",
|
||||
"Address of the UDP server to connect to/serve on.")
|
||||
parityFlag := flag.Int("parityshards", 0,
|
||||
"Number of parity shards for the Reed Solomon Erasure Coding used for UDP."+
|
||||
" Clients and servers should have the same number.")
|
||||
udpTimeoutFlag := flag.Duration("udptimeout", 2*time.Second,
|
||||
"Timeout for each")
|
||||
|
||||
flag.Parse()
|
||||
if !(*tsdbFlag != "" || *textFlag || *udpAddrFlag != "") {
|
||||
glog.Fatal("Specify the address of the OpenTSDB server to write to with -tsdb")
|
||||
} else if *configFlag == "" {
|
||||
glog.Fatal("Specify a JSON configuration file with -config")
|
||||
}
|
||||
|
||||
config, err := loadConfig(*configFlag)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
// Ignore the default "subscribe-to-everything" subscription of the
|
||||
// -subscribe flag.
|
||||
subscriptions := strings.Split(*subscribePaths, ",")
|
||||
if subscriptions[0] == "" {
|
||||
subscriptions = subscriptions[1:]
|
||||
}
|
||||
// Add the subscriptions from the config file.
|
||||
subscriptions = append(subscriptions, config.Subscriptions...)
|
||||
|
||||
// Run a UDP server that forwards messages to OpenTSDB via Telnet (TCP)
|
||||
if *isUDPServerFlag {
|
||||
if *udpAddrFlag == "" {
|
||||
glog.Fatal("Specify the address for the UDP server to listen on with -udpaddr")
|
||||
}
|
||||
server, err := newUDPServer(*udpAddrFlag, *tsdbFlag, *parityFlag)
|
||||
if err != nil {
|
||||
glog.Fatal("Failed to create UDP server: ", err)
|
||||
}
|
||||
glog.Fatal(server.Run())
|
||||
}
|
||||
|
||||
var c OpenTSDBConn
|
||||
if *textFlag {
|
||||
c = newTextDumper()
|
||||
} else if *udpAddrFlag != "" {
|
||||
c = newUDPClient(*udpAddrFlag, *parityFlag, *udpTimeoutFlag)
|
||||
} else {
|
||||
// TODO: support HTTP(S).
|
||||
c = newTelnetClient(*tsdbFlag)
|
||||
}
|
||||
ctx := gnmi.NewContext(context.Background(), cfg)
|
||||
client, err := gnmi.Dial(cfg)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
respChan := make(chan *pb.SubscribeResponse)
|
||||
errChan := make(chan error)
|
||||
subscribeOptions := &gnmi.SubscribeOptions{
|
||||
Mode: "stream",
|
||||
StreamMode: "target_defined",
|
||||
Paths: gnmi.SplitPaths(subscriptions),
|
||||
}
|
||||
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
|
||||
for {
|
||||
select {
|
||||
case resp := <-respChan:
|
||||
pushToOpenTSDB(cfg.Addr, c, config, resp.GetUpdate())
|
||||
case err := <-errChan:
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pushToOpenTSDB(addr string, conn OpenTSDBConn, config *Config, notif *pb.Notification) {
|
||||
if notif == nil {
|
||||
glog.Error("Nil notification ignored")
|
||||
return
|
||||
}
|
||||
if notif.Timestamp <= 0 {
|
||||
glog.Fatalf("Invalid timestamp %d in %s", notif.Timestamp, notif)
|
||||
}
|
||||
host := addr[:strings.IndexRune(addr, ':')]
|
||||
if host == "localhost" {
|
||||
// TODO: On Linux this reads /proc/sys/kernel/hostname each time,
|
||||
// which isn't the most efficient, but at least we don't have to
|
||||
// deal with detecting hostname changes.
|
||||
host, _ = os.Hostname()
|
||||
if host == "" {
|
||||
glog.Info("could not figure out localhost's hostname")
|
||||
return
|
||||
}
|
||||
}
|
||||
prefix := "/" + strings.Join(notif.Prefix.Element, "/")
|
||||
for _, update := range notif.Update {
|
||||
if update.Value == nil || update.Value.Type != pb.Encoding_JSON {
|
||||
glog.V(9).Infof("Ignoring incompatible update value in %s", update)
|
||||
continue
|
||||
}
|
||||
value := parseValue(update)
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
path := prefix + "/" + strings.Join(update.Path.Element, "/")
|
||||
metricName, tags := config.Match(path)
|
||||
if metricName == "" {
|
||||
glog.V(8).Infof("Ignoring unmatched update at %s: %+v", path, update.Value)
|
||||
continue
|
||||
}
|
||||
tags["host"] = host
|
||||
for i, v := range value {
|
||||
if len(value) > 1 {
|
||||
tags["index"] = strconv.Itoa(i)
|
||||
}
|
||||
err := conn.Put(&DataPoint{
|
||||
Metric: metricName,
|
||||
Timestamp: uint64(notif.Timestamp),
|
||||
Value: v,
|
||||
Tags: tags,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Info("Failed to put datapoint: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseValue returns either an integer/floating point value of the given update, or if
|
||||
// the value is a slice of integers/floating point values. If the value is neither of these
|
||||
// or if any element in the slice is non numerical, parseValue returns nil.
|
||||
func parseValue(update *pb.Update) []interface{} {
|
||||
var value interface{}
|
||||
|
||||
decoder := json.NewDecoder(bytes.NewReader(update.Value.Value))
|
||||
decoder.UseNumber()
|
||||
err := decoder.Decode(&value)
|
||||
if err != nil {
|
||||
glog.Fatalf("Malformed JSON update %q in %s", update.Value.Value, update)
|
||||
}
|
||||
|
||||
switch value := value.(type) {
|
||||
case json.Number:
|
||||
return []interface{}{parseNumber(value, update)}
|
||||
case []interface{}:
|
||||
for i, val := range value {
|
||||
jsonNum, ok := val.(json.Number)
|
||||
if !ok {
|
||||
// If any value is not a number, skip it.
|
||||
glog.Infof("Element %d: %v is %T, not json.Number", i, val, val)
|
||||
continue
|
||||
}
|
||||
num := parseNumber(jsonNum, update)
|
||||
value[i] = num
|
||||
}
|
||||
return value
|
||||
case map[string]interface{}:
|
||||
// Special case for simple value types that just have a "value"
|
||||
// attribute (common case).
|
||||
if val, ok := value["value"].(json.Number); ok && len(value) == 1 {
|
||||
return []interface{}{parseNumber(val, update)}
|
||||
}
|
||||
default:
|
||||
glog.V(9).Infof("Ignoring non-numeric or non-numeric slice value in %s", update)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert our json.Number to either an int64, uint64, or float64.
|
||||
func parseNumber(num json.Number, update *pb.Update) interface{} {
|
||||
var value interface{}
|
||||
var err error
|
||||
if value, err = num.Int64(); err != nil {
|
||||
// num is either a large unsigned integer or a floating point.
|
||||
if strings.Contains(err.Error(), "value out of range") { // Sigh.
|
||||
value, err = strconv.ParseUint(num.String(), 10, 64)
|
||||
} else {
|
||||
value, err = num.Float64()
|
||||
if err != nil {
|
||||
glog.Fatalf("Malformed JSON number %q in %s", num, update)
|
||||
}
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
46
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main_test.go
generated
vendored
46
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main_test.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
func TestParseValue(t *testing.T) { // Because parsing JSON sucks.
|
||||
testcases := []struct {
|
||||
input string
|
||||
expected interface{}
|
||||
}{
|
||||
{"42", []interface{}{int64(42)}},
|
||||
{"-42", []interface{}{int64(-42)}},
|
||||
{"42.42", []interface{}{float64(42.42)}},
|
||||
{"-42.42", []interface{}{float64(-42.42)}},
|
||||
{`"foo"`, []interface{}(nil)},
|
||||
{"9223372036854775807", []interface{}{int64(math.MaxInt64)}},
|
||||
{"-9223372036854775808", []interface{}{int64(math.MinInt64)}},
|
||||
{"9223372036854775808", []interface{}{uint64(math.MaxInt64) + 1}},
|
||||
{"[1,3,5,7,9]", []interface{}{int64(1), int64(3), int64(5), int64(7), int64(9)}},
|
||||
{"[1,9223372036854775808,0,-9223372036854775808]", []interface{}{
|
||||
int64(1),
|
||||
uint64(math.MaxInt64) + 1,
|
||||
int64(0),
|
||||
int64(math.MinInt64)},
|
||||
},
|
||||
}
|
||||
for i, tcase := range testcases {
|
||||
actual := parseValue(&pb.Update{
|
||||
Value: &pb.Value{
|
||||
Value: []byte(tcase.input),
|
||||
},
|
||||
})
|
||||
if d := test.Diff(tcase.expected, actual); d != "" {
|
||||
t.Errorf("#%d: %s: %#v vs %#v", i, d, tcase.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
54
vendor/github.com/aristanetworks/goarista/cmd/octsdb/sampleconfig.json
generated
vendored
54
vendor/github.com/aristanetworks/goarista/cmd/octsdb/sampleconfig.json
generated
vendored
@ -1,54 +0,0 @@
|
||||
{
|
||||
"comment": "This is a sample configuration for EOS versions below 4.20",
|
||||
"subscriptions": [
|
||||
"/Sysdb/interface/counter/eth/lag",
|
||||
"/Sysdb/interface/counter/eth/slice/phy",
|
||||
|
||||
"/Sysdb/environment/temperature/status",
|
||||
"/Sysdb/environment/cooling/status",
|
||||
"/Sysdb/environment/power/status",
|
||||
|
||||
"/Sysdb/hardware/xcvr/status/all/xcvrStatus"
|
||||
],
|
||||
"metricPrefix": "eos",
|
||||
"metrics": {
|
||||
"intfCounter": {
|
||||
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
|
||||
},
|
||||
"intfPktCounter": {
|
||||
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
|
||||
},
|
||||
"intfPfcClassCounter": {
|
||||
"path": "/Sysdb/(interface)/counter/eth/(?:lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter/current/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)"
|
||||
},
|
||||
|
||||
"tempSensor": {
|
||||
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/((?:maxT|t)emperature)"
|
||||
},
|
||||
"tempSensorAlert": {
|
||||
"path": "/Sysdb/(environment)/temperature/status/tempSensor/(?P<sensor>.+)/(alertRaisedCount)"
|
||||
},
|
||||
"currentSensor": {
|
||||
"path": "/Sysdb/(environment)/power/status/currentSensor/(?P<sensor>.+)/(current)"
|
||||
},
|
||||
"powerSensor": {
|
||||
"path": "/Sysdb/(environment/power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power"
|
||||
},
|
||||
"voltageSensor": {
|
||||
"path": "/Sysdb/(environment)/power/status/voltageSensor/(?P<sensor>.+)/(voltage)"
|
||||
},
|
||||
"railCurrentSensor": {
|
||||
"path": "/Sysdb/(environment)/power/status/voltageSensor/(?P<sensor>.+)/(current)"
|
||||
},
|
||||
"fanSpeed": {
|
||||
"path": "/Sysdb/(environment)/cooling/status/(fan)/(?P<fan>.+)/(speed)"
|
||||
},
|
||||
|
||||
"qsfpRxPower": {
|
||||
"path": "/Sysdb/hardware/(xcvr)/status/all/xcvrStatus/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)"
|
||||
},
|
||||
"sfpTemperature": {
|
||||
"path": "/Sysdb/hardware/(xcvr)/status/all/xcvrStatus/(?P<intf>.+)/lastDomUpdateTime/(temperature)"
|
||||
}
|
||||
}
|
||||
}
|
66
vendor/github.com/aristanetworks/goarista/cmd/octsdb/sampleconfig4.20.json
generated
vendored
66
vendor/github.com/aristanetworks/goarista/cmd/octsdb/sampleconfig4.20.json
generated
vendored
@ -1,66 +0,0 @@
|
||||
{
|
||||
"comment": "This is a sample configuration for EOS versions above 4.20",
|
||||
"subscriptions": [
|
||||
"/Smash/counters/ethIntf",
|
||||
"/Smash/interface/counter/lag/current/counter",
|
||||
|
||||
"/Sysdb/environment/archer/cooling/status",
|
||||
"/Sysdb/environment/archer/power/status",
|
||||
"/Sysdb/environment/archer/temperature/status",
|
||||
|
||||
"/Sysdb/hardware/archer/xcvr/status"
|
||||
],
|
||||
"metricPrefix": "eos",
|
||||
"metrics": {
|
||||
"intfCounter": {
|
||||
"path": "/Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
|
||||
},
|
||||
"intfLagCounter": {
|
||||
"path": "/Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(Octets|Errors|Discards)"
|
||||
},
|
||||
"intfPktCounter": {
|
||||
"path": "/Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
|
||||
},
|
||||
"intfLagPktCounter": {
|
||||
"path": "/Smash/interface/counter/lag/current/(counter)/(?P<intf>.+)/statistics/(?P<direction>(?:in|out))(?P<type>(?:Ucast|Multicast|Broadcast))(Pkt)"
|
||||
},
|
||||
"intfPfcClassCounter": {
|
||||
"path": "/Smash/counters/ethIntf/FocalPointV2/current/(counter)/(?P<intf>.+)/ethStatistics/(?P<direction>(?:in|out))(PfcClassFrames)"
|
||||
},
|
||||
|
||||
"tempSensor": {
|
||||
"path": "/Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/((?:maxT|t)emperature)"
|
||||
},
|
||||
"tempSensorAlert": {
|
||||
"path": "/Sysdb/(environment)/archer/temperature/status/(?P<sensor>.+)/(alertRaisedCount)"
|
||||
},
|
||||
"currentSensor": {
|
||||
"path": "/Sysdb/(environment)/archer/power/status/currentSensor/(?P<sensor>.+)/(current)"
|
||||
},
|
||||
"powerSensor": {
|
||||
"path": "/Sysdb/(environment)/archer/(power)/status/powerSupply/(?P<sensor>.+)/(input|output)Power"
|
||||
},
|
||||
"voltageSensor": {
|
||||
"path": "/Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(voltage)"
|
||||
},
|
||||
"railCurrentSensor": {
|
||||
"path": "/Sysdb/(environment)/archer/power/status/voltageSensor/(?:cell/.+|system)/(?P<sensor>.+)/(current)"
|
||||
},
|
||||
"fanSpeed": {
|
||||
"path": "/Sysdb/(environment)/archer/(cooling)/status/(?P<fan>.+)/speed"
|
||||
},
|
||||
|
||||
"qsfpModularRxPower": {
|
||||
"path": "/Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)"
|
||||
},
|
||||
"qsfpFixedRxPower": {
|
||||
"path": "/Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/domRegisterData/lane(?P<lane>\\d)(OpticalRxPower)"
|
||||
},
|
||||
"sfpModularTemperature": {
|
||||
"path": "/Sysdb/hardware/archer/(xcvr)/status/slice/(?P<linecard>.+)/(?P<intf>.+)/lastDomUpdateTime/(temperature)"
|
||||
},
|
||||
"sfpFixedTemperature": {
|
||||
"path": "/Sysdb/hardware/archer/(xcvr)/status/all/(?P<intf>.+)/lastDomUpdateTime/(temperature)"
|
||||
}
|
||||
}
|
||||
}
|
65
vendor/github.com/aristanetworks/goarista/cmd/octsdb/telnet.go
generated
vendored
65
vendor/github.com/aristanetworks/goarista/cmd/octsdb/telnet.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
)
|
||||
|
||||
type telnetClient struct {
|
||||
addr string
|
||||
conn net.Conn
|
||||
}
|
||||
|
||||
func newTelnetClient(addr string) OpenTSDBConn {
|
||||
return &telnetClient{
|
||||
addr: addr,
|
||||
}
|
||||
}
|
||||
|
||||
func readErrors(conn net.Conn) {
|
||||
var buf [4096]byte
|
||||
for {
|
||||
// TODO: We should add a buffer to read line-by-line properly instead
|
||||
// of using a fixed-size buffer and splitting on newlines manually.
|
||||
n, err := conn.Read(buf[:])
|
||||
if n == 0 {
|
||||
return
|
||||
} else if n > 0 {
|
||||
for _, line := range bytes.Split(buf[:n], []byte{'\n'}) {
|
||||
if s := string(line); s != "" {
|
||||
glog.Info("tsd replied: ", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *telnetClient) Put(d *DataPoint) error {
|
||||
return c.PutBytes([]byte(d.String()))
|
||||
}
|
||||
|
||||
func (c *telnetClient) PutBytes(d []byte) error {
|
||||
var err error
|
||||
if c.conn == nil {
|
||||
c.conn, err = net.Dial("tcp", c.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go readErrors(c.conn)
|
||||
}
|
||||
_, err = c.conn.Write(d)
|
||||
if err != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
return err
|
||||
}
|
16
vendor/github.com/aristanetworks/goarista/cmd/octsdb/text.go
generated
vendored
16
vendor/github.com/aristanetworks/goarista/cmd/octsdb/text.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
type textDumper struct{}
|
||||
|
||||
func newTextDumper() OpenTSDBConn {
|
||||
return textDumper{}
|
||||
}
|
||||
|
||||
func (t textDumper) Put(d *DataPoint) error {
|
||||
print(d.String())
|
||||
return nil
|
||||
}
|
37
vendor/github.com/aristanetworks/goarista/cmd/octsdb/tsdb.go
generated
vendored
37
vendor/github.com/aristanetworks/goarista/cmd/octsdb/tsdb.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
// DataPoint for OpenTSDB to store.
|
||||
type DataPoint struct {
|
||||
// Metric name.
|
||||
Metric string `json:"metric"`
|
||||
|
||||
// UNIX timestamp with millisecond resolution.
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
|
||||
// Value of the data point (integer or floating point).
|
||||
Value interface{} `json:"value"`
|
||||
|
||||
// Tags. The host is automatically populated by the OpenTSDBConn.
|
||||
Tags map[string]string `json:"tags"`
|
||||
}
|
||||
|
||||
func (d *DataPoint) String() string {
|
||||
var tags string
|
||||
if len(d.Tags) != 0 {
|
||||
for tag, value := range d.Tags {
|
||||
tags += " " + tag + "=" + value
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("put %s %d %#v%s\n", d.Metric, d.Timestamp/1e9, d.Value, tags)
|
||||
}
|
||||
|
||||
// OpenTSDBConn is a managed connection to an OpenTSDB instance (or cluster).
|
||||
type OpenTSDBConn interface {
|
||||
Put(d *DataPoint) error
|
||||
}
|
104
vendor/github.com/aristanetworks/goarista/cmd/octsdb/udp.go
generated
vendored
104
vendor/github.com/aristanetworks/goarista/cmd/octsdb/udp.go
generated
vendored
@ -1,104 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
kcp "github.com/xtaci/kcp-go"
|
||||
)
|
||||
|
||||
type udpClient struct {
|
||||
addr string
|
||||
conn *kcp.UDPSession
|
||||
parity int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func newUDPClient(addr string, parity int, timeout time.Duration) OpenTSDBConn {
|
||||
return &udpClient{
|
||||
addr: addr,
|
||||
parity: parity,
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *udpClient) Put(d *DataPoint) error {
|
||||
var err error
|
||||
if c.conn == nil {
|
||||
// Prevent a bunch of clients all disconnecting and attempting to reconnect
|
||||
// at nearly the same time.
|
||||
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
|
||||
|
||||
c.conn, err = kcp.DialWithOptions(c.addr, nil, 10, c.parity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.conn.SetNoDelay(1, 40, 1, 1) // Suggested by kcp-go to lower cpu usage
|
||||
}
|
||||
|
||||
dStr := d.String()
|
||||
glog.V(3).Info(dStr)
|
||||
|
||||
c.conn.SetWriteDeadline(time.Now().Add(c.timeout))
|
||||
_, err = c.conn.Write([]byte(dStr))
|
||||
if err != nil {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type udpServer struct {
|
||||
lis *kcp.Listener
|
||||
telnet *telnetClient
|
||||
}
|
||||
|
||||
func newUDPServer(udpAddr, tsdbAddr string, parity int) (*udpServer, error) {
|
||||
lis, err := kcp.ListenWithOptions(udpAddr, nil, 10, parity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &udpServer{
|
||||
lis: lis,
|
||||
telnet: newTelnetClient(tsdbAddr).(*telnetClient),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *udpServer) Run() error {
|
||||
for {
|
||||
conn, err := c.lis.AcceptKCP()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn.SetNoDelay(1, 40, 1, 1) // Suggested by kcp-go to lower cpu usage
|
||||
if glog.V(3) {
|
||||
glog.Infof("New connection from %s", conn.RemoteAddr())
|
||||
}
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
var buf [4096]byte
|
||||
for {
|
||||
n, err := conn.Read(buf[:])
|
||||
if err != nil {
|
||||
if n != 0 { // Not EOF
|
||||
glog.Error(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if glog.V(3) {
|
||||
glog.Info(string(buf[:n]))
|
||||
}
|
||||
err = c.telnet.PutBytes(buf[:n])
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
3
vendor/github.com/aristanetworks/goarista/cmd/openconfigbeat/README.md
generated
vendored
3
vendor/github.com/aristanetworks/goarista/cmd/openconfigbeat/README.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
# openconfigbeat
|
||||
|
||||
The code for `openconfigbeat` lives at [aristanetworks/openconfigbeat](https://github.com/aristanetworks/openconfigbeat).
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user