forked from cerc-io/ipld-eth-server
Update Geth to 1.9.0
This commit is contained in:
parent
a3b55bfd56
commit
987abd4b2e
402
Gopkg.lock
generated
402
Gopkg.lock
generated
@ -2,73 +2,100 @@
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:48a213e9dc4880bbbd6999309a476fa4d3cc67560aa7127154cf8ea95bd464c2"
|
||||
name = "github.com/allegro/bigcache"
|
||||
packages = [
|
||||
".",
|
||||
"queue",
|
||||
"queue"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f31987a23e44c5121ef8c8b2f2ea2e8ffa37b068"
|
||||
version = "v1.1.0"
|
||||
revision = "69ea0af04088faa57adb9ac683934277141e92a5"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a313376bcbcce8ae8bddb8089a7293e0473a0f8e9e3710d6244e09e81875ccf0"
|
||||
name = "github.com/aristanetworks/goarista"
|
||||
packages = ["monotime"]
|
||||
pruneopts = ""
|
||||
revision = "ff33da284e760fcdb03c33d37a719e5ed30ba844"
|
||||
revision = "ed1100a1c0154be237da0078e86b19c523c8c661"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c6bf1ac7bbc0fe51637bf54d5a88ff79b171b3b42dbc665dec98303c862d8662"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = ""
|
||||
revision = "cff30e1d23fc9e800b2b5b4b41ef1817dda07e9f"
|
||||
revision = "c26ffa870fd817666a857af1bf6498fabba1ffe3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d47691333460db6ac83ced03c79b4bdb9aff3e322be24affb7855bed8affc6c"
|
||||
name = "github.com/dave/jennifer"
|
||||
packages = ["jen"]
|
||||
pruneopts = ""
|
||||
revision = "14e399b6b5e8456c66c45c955fc27b568bacb5c9"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aaeffbff5bd24654cb4c190ed75d6c7b57b4f5d6741914c1a7a6bb7447e756c5"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/deckarep/golang-set"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e"
|
||||
version = "v1.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:90d36f5b581e95e00ced808cd48824ed6c320c25887828cce461bdef4cb7bc7c"
|
||||
name = "github.com/edsrzf/mmap-go"
|
||||
packages = ["."]
|
||||
revision = "188cc3b666ba704534fa4f96e9e61f21f1e1ba7c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/elastic/gosigar"
|
||||
packages = [
|
||||
".",
|
||||
"sys/windows"
|
||||
]
|
||||
revision = "f75810decf6f4d88b130bfc4d2ba7ccdcea0c01d"
|
||||
version = "v0.10.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = [
|
||||
".",
|
||||
"accounts",
|
||||
"accounts/abi",
|
||||
"accounts/abi/bind",
|
||||
"accounts/external",
|
||||
"accounts/keystore",
|
||||
"accounts/scwallet",
|
||||
"accounts/usbwallet",
|
||||
"accounts/usbwallet/trezor",
|
||||
"common",
|
||||
"common/bitutil",
|
||||
"common/hexutil",
|
||||
"common/math",
|
||||
"common/mclock",
|
||||
"common/prque",
|
||||
"consensus",
|
||||
"consensus/clique",
|
||||
"consensus/ethash",
|
||||
"consensus/misc",
|
||||
"core",
|
||||
"core/bloombits",
|
||||
"core/rawdb",
|
||||
"core/state",
|
||||
"core/types",
|
||||
"core/vm",
|
||||
"crypto",
|
||||
"crypto/bn256",
|
||||
"crypto/bn256/cloudflare",
|
||||
"crypto/bn256/google",
|
||||
"crypto/ecies",
|
||||
"crypto/secp256k1",
|
||||
"eth/downloader",
|
||||
"ethclient",
|
||||
"ethdb",
|
||||
"ethdb/leveldb",
|
||||
"ethdb/memorydb",
|
||||
"event",
|
||||
"internal/ethapi",
|
||||
"log",
|
||||
"metrics",
|
||||
"p2p",
|
||||
@ -81,64 +108,62 @@
|
||||
"params",
|
||||
"rlp",
|
||||
"rpc",
|
||||
"trie",
|
||||
"signer/core",
|
||||
"signer/storage",
|
||||
"trie"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "cd79bc61a983d6482579d12cdd239b37bbfa12ef"
|
||||
revision = "52f2461774bcb8cdd310f86b4bc501df5b783852"
|
||||
version = "v1.9.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a01080d20c45c031c13f3828c56e58f4f51d926a482ad10cc0316225097eb7ea"
|
||||
branch = "master"
|
||||
name = "github.com/gballet/go-libpcsclite"
|
||||
packages = ["."]
|
||||
revision = "2772fd86a8ff4306d2749f610a386bfee9e0d727"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = ""
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
packages = [
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor"
|
||||
]
|
||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5247b135b5492aa232a731acdcb52b08f32b874cb398f21ab460396eadbe866b"
|
||||
name = "github.com/google/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494"
|
||||
version = "v1.0.0"
|
||||
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru",
|
||||
"simplelru"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
||||
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d14365c51dd1d34d5c79833ec91413bfbb166be978724f15701e17080dc06dec"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
@ -150,29 +175,24 @@
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
"json/token"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b3c5b95e56c06f5aa72cb2500e6ee5f44fcd122872d4fec2023a488e561218bc"
|
||||
name = "github.com/hpcloud/tail"
|
||||
packages = [
|
||||
".",
|
||||
"ratelimiter",
|
||||
"util",
|
||||
"watch",
|
||||
"winfile",
|
||||
"winfile"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b6e4cc26365c004808649862e22069de09594a9222143399a7a04904e9f7018c"
|
||||
name = "github.com/huin/goupnp"
|
||||
packages = [
|
||||
".",
|
||||
@ -181,83 +201,85 @@
|
||||
"httpu",
|
||||
"scpd",
|
||||
"soap",
|
||||
"ssdp",
|
||||
"ssdp"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "1395d1447324cbea88d249fbfcfd70ea878fdfca"
|
||||
revision = "656e61dfadd241c7cbdd22a023fa81ecb6860ea8"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:76f836364ae83ed811c415aa92e1209ce49de9f62aad85b85fca749a8b96a110"
|
||||
name = "github.com/jackpal/go-nat-pmp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c9cfead9f2a36ddf3daa40ba269aa7f4bbba6b62"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:617ee2434b77e911fa26b678730be9a617f75243b194eadc8201c8ac860844aa"
|
||||
name = "github.com/jmoiron/sqlx"
|
||||
packages = [
|
||||
".",
|
||||
"reflectx",
|
||||
"reflectx"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "0dae4fefe7c0e190f7b5a78dac28a1c82cc8d849"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a874e3ddfb9db2b42bd8c85b6875407c702fa868eed20634ff489bc896ccfd3"
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
|
||||
version = "v1.0.1"
|
||||
revision = "38398a30ed8516ffda617a04c822de09df8a3ec5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:29145d7af4adafd72a79df5e41456ac9e232d5a28c1cd4dacf3ff008a217fc10"
|
||||
name = "github.com/karalabe/usb"
|
||||
packages = ["."]
|
||||
revision = "9be757f914c0907b7ddd561ea86eec15313ac022"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/lib/pq"
|
||||
packages = [
|
||||
".",
|
||||
"oid",
|
||||
"scram"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "4ded0e9383f75c197b3a2aaa6d590ac52df6fd79"
|
||||
revision = "3427c32cb71afc948325f299f040e53c1dd78979"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:961dc3b1d11f969370533390fdf203813162980c858e1dabe827b60940c909a5"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
revision = "de8848e004dd33dc07a2947b3d76f618a7fc7ef1"
|
||||
version = "v1.8.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "3ee7d812e62a0804a7d0a324e0249ca2db3476d3"
|
||||
version = "v0.0.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4"
|
||||
version = "v1.0.0"
|
||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5219b4506253ccc598f9340677162a42d6a78f340a4cc6df2d62db4d0593c4e9"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8"
|
||||
version = "v1.0.0"
|
||||
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/olekukonko/tablewriter"
|
||||
packages = ["."]
|
||||
revision = "e6d60cf7ba1f42d86d54cdf5508611c4aafb3970"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a7fd918fb5bd2188436785c0424f8a50b4addfedf37a2b14d796be2a927b8007"
|
||||
name = "github.com/onsi/ginkgo"
|
||||
packages = [
|
||||
".",
|
||||
@ -277,14 +299,12 @@
|
||||
"reporters/stenographer",
|
||||
"reporters/stenographer/support/go-colorable",
|
||||
"reporters/stenographer/support/go-isatty",
|
||||
"types",
|
||||
"types"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "3774a09d95489ccaa16032e0770d08ea77ba6184"
|
||||
version = "v1.6.0"
|
||||
revision = "eea6ad008b96acdaa524f5b409513bf062b500ad"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3ecd0a37c4a90c12a97e31c398cdbc173824351aa891898ee178120bfe71c478"
|
||||
name = "github.com/onsi/gomega"
|
||||
packages = [
|
||||
".",
|
||||
@ -299,122 +319,117 @@
|
||||
"matchers/support/goraph/edge",
|
||||
"matchers/support/goraph/node",
|
||||
"matchers/support/goraph/util",
|
||||
"types",
|
||||
"types"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "7615b9433f86a8bdf29709bf288bc4fd0636a369"
|
||||
version = "v1.4.2"
|
||||
revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a5484d4fa43127138ae6e7b2299a6a52ae006c7f803d98d717f60abf3e97192e"
|
||||
name = "github.com/pborman/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:894aef961c056b6d85d12bac890bf60c44e99b46292888bfa66caf529f804457"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
revision = "728039f679cbcd4f6a54e080d2219a4c4928c546"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fdbe7e05d74cc4d175cc4515a7807a5bb8b66ebe130da382b99713c9038648ae"
|
||||
name = "github.com/pressly/goose"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "e4b98955473e91a12fc7d8816c28d06376d1d92c"
|
||||
version = "v2.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7143292549152d009ca9e9c493b74736a2ebd93f921bea8a4b308d7cc5edc6b3"
|
||||
name = "github.com/rjeczalik/notify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0f065fa99b48b842c3fd3e2c8b194c6f2b69f6b8"
|
||||
name = "github.com/prometheus/tsdb"
|
||||
packages = ["fileutil"]
|
||||
revision = "d230c67aa180850b80ae49e07079f55df1da0502"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:78c9cf43ddeacd0e472f412082227a0fac2ae107ee60e9112156f9371f9912cf"
|
||||
name = "github.com/rjeczalik/notify"
|
||||
packages = ["."]
|
||||
revision = "69d839f37b13a8cb7a78366f7633a4071cb43be7"
|
||||
version = "v0.9.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/rs/cors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "3fb1b69b103a84de38a19c3c6ec073dd6caa4d3f"
|
||||
version = "v1.5.0"
|
||||
revision = "9a47f48565a795472d43519dd49aac781f3034fb"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d57e200ef5ccc4217fe0a34287308bac652435e7c6513f6263e0493d2245c56"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
|
||||
version = "v1.2.0"
|
||||
revision = "839c75faf7f98a33d445d181f3018b5c3409a45e"
|
||||
version = "v1.4.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d0431c2fd72e39ee43ea7742322abbc200c3e704c9102c5c3c2e2e667095b0ca"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem",
|
||||
"mem"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
|
||||
version = "v1.1.2"
|
||||
revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d0b38ba6da419a6d4380700218eeec8623841d44a856bb57369c172fbf692ab4"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "8965335b8c7107321228e3e3702cab9832751bac"
|
||||
version = "v1.2.0"
|
||||
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1403cc8a94b8d7956ee5e9694badef0e7b051af289caad1cf668331e3ffa4f6"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5"
|
||||
version = "v0.0.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9ceffa4ab5f7195ecf18b3a7fff90c837a9ed5e22e66d18069e4bccfe1f52aa0"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
|
||||
version = "v1.0.0"
|
||||
revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0a52bcb568386d98f4894575d53ce3e456f56471de6897bb8b9de13c33d9340e"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac25ea6cc1156aca9611411274b4a0bdd83a623845df6985aab508253955cc66"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "8fb642006536c8d3760c99d4fa2389f5e2205631"
|
||||
version = "v1.2.0"
|
||||
revision = "b5bf975e5823809fb22c7644d008757f78a4259e"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "develop"
|
||||
name = "github.com/status-im/keycard-go"
|
||||
packages = ["derivationpath"]
|
||||
revision = "d95853db0f480b9d6379009500acf44b21dc0be6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/steakknife/bloomfilter"
|
||||
packages = ["."]
|
||||
revision = "99ee86d9200fcc2ffde62f508329bd6627c0a307"
|
||||
version = "1.0.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/steakknife/hamming"
|
||||
packages = ["."]
|
||||
revision = "003c143a81c25ea5e263d692919c611c7122ae6b"
|
||||
version = "0.2.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ce5194e5afac308cc34e500cab45b4ce88a0742d689e3cf7e37b607ad76bed2f"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
@ -428,59 +443,68 @@
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util",
|
||||
"leveldb/util"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ae2bd5eed72d46b28834ec3f60db3a3ebedd8dbd"
|
||||
revision = "9d007e481048296f09f59bd19bb7ae584563cd95"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tyler-smith/go-bip39"
|
||||
packages = [
|
||||
".",
|
||||
"wordlists"
|
||||
]
|
||||
revision = "2af0a847066a4f2669040ccd44a79c8eca10806a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/wsddn/go-ecdh"
|
||||
packages = ["."]
|
||||
revision = "48726bab92085232373de4ec5c51ce7b441c63a0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:59b49c47c11a48f1054529207f65907c014ecf5f9a7c0d9c0f1616dec7b062ed"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"curve25519",
|
||||
"pbkdf2",
|
||||
"ripemd160",
|
||||
"scrypt",
|
||||
"sha3",
|
||||
"ssh/terminal",
|
||||
"ssh/terminal"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
|
||||
revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fbdbb6cf8db3278412c9425ad78b26bb8eb788181f26a3ffb3e4f216b314f86a"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"html",
|
||||
"html/atom",
|
||||
"html/charset",
|
||||
"websocket",
|
||||
"websocket"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
|
||||
revision = "da137c7871d730100384dbcf36e6f8fa493aef5b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = ""
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:70d519d5cddeb60ceda2db88c24c340b1b2d7efb25ab54bacb38f57ea1998df7"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
"windows",
|
||||
"windows"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "d641721ec2dead6fe5ca284096fe4b1fcd49e427"
|
||||
revision = "fae7ac547cb717d141c433a2a173315e216b64c4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"encoding",
|
||||
@ -494,6 +518,8 @@
|
||||
"encoding/traditionalchinese",
|
||||
"encoding/unicode",
|
||||
"internal/gen",
|
||||
"internal/language",
|
||||
"internal/language/compact",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
@ -502,79 +528,39 @@
|
||||
"runes",
|
||||
"transform",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/norm"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
source = "gopkg.in/fsnotify/fsnotify.v1"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:4f830ee018eb8c56d0def653ad7c9a1d2a053f0cef2ac6b2200f73b98fa6a681"
|
||||
name = "gopkg.in/natefinch/npipe.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6"
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
digest = "1:a96d16bd088460f2e0685d46c39bcf1208ba46e0a977be2df49864ec7da447dd"
|
||||
name = "gopkg.in/tomb.v1"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||
version = "v2.2.2"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/dave/jennifer/jen",
|
||||
"github.com/ethereum/go-ethereum",
|
||||
"github.com/ethereum/go-ethereum/accounts/abi",
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind",
|
||||
"github.com/ethereum/go-ethereum/common",
|
||||
"github.com/ethereum/go-ethereum/common/hexutil",
|
||||
"github.com/ethereum/go-ethereum/core/rawdb",
|
||||
"github.com/ethereum/go-ethereum/core/types",
|
||||
"github.com/ethereum/go-ethereum/crypto",
|
||||
"github.com/ethereum/go-ethereum/ethclient",
|
||||
"github.com/ethereum/go-ethereum/ethdb",
|
||||
"github.com/ethereum/go-ethereum/p2p",
|
||||
"github.com/ethereum/go-ethereum/p2p/discv5",
|
||||
"github.com/ethereum/go-ethereum/rlp",
|
||||
"github.com/ethereum/go-ethereum/rpc",
|
||||
"github.com/hashicorp/golang-lru",
|
||||
"github.com/hpcloud/tail",
|
||||
"github.com/jmoiron/sqlx",
|
||||
"github.com/lib/pq",
|
||||
"github.com/mitchellh/go-homedir",
|
||||
"github.com/onsi/ginkgo",
|
||||
"github.com/onsi/gomega",
|
||||
"github.com/onsi/gomega/ghttp",
|
||||
"github.com/pressly/goose",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/viper",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"gopkg.in/tomb.v1",
|
||||
]
|
||||
inputs-digest = "7c2260ca67851d579e74e84e128e6934b3a8bb4bb4f450a5ab9a42f31c20e91d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
@ -29,6 +29,10 @@
|
||||
name = "github.com/pressly/sup"
|
||||
version = "0.5.3"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "1.3.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/onsi/ginkgo"
|
||||
version = "1.4.0"
|
||||
@ -51,4 +55,4 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
revision = "cd79bc61a983d6482579d12cdd239b37bbfa12ef"
|
||||
version = "1.9.0"
|
@ -220,7 +220,7 @@ var _ = Describe("Parser", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
selectMethods := p.GetMethods([]string{})
|
||||
Expect(len(selectMethods)).To(Equal(22))
|
||||
Expect(len(selectMethods)).To(Equal(25))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -29,7 +29,7 @@ type Block struct {
|
||||
Number int64 `db:"number"`
|
||||
ParentHash string `db:"parent_hash"`
|
||||
Size string `db:"size"`
|
||||
Time int64 `db:"time"`
|
||||
Time uint64 `db:"time"`
|
||||
Transactions []TransactionModel
|
||||
UncleHash string `db:"uncle_hash"`
|
||||
UnclesReward string `db:"uncles_reward"`
|
||||
|
@ -42,7 +42,7 @@ type POAHeader struct {
|
||||
Number *hexutil.Big `json:"number" gencodec:"required"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||
Time *hexutil.Big `json:"timestamp" gencodec:"required"`
|
||||
Time hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||
Extra hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
type Reader interface {
|
||||
@ -48,7 +49,7 @@ func (ldbr *LevelDatabaseReader) GetBlockNumber(hash common.Hash) *uint64 {
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetBlockReceipts(hash common.Hash, number uint64) types.Receipts {
|
||||
return rawdb.ReadReceipts(ldbr.reader, hash, number)
|
||||
return rawdb.ReadReceipts(ldbr.reader, hash, number, ¶ms.ChainConfig{})
|
||||
}
|
||||
|
||||
func (ldbr *LevelDatabaseReader) GetCanonicalHash(number uint64) common.Hash {
|
||||
|
@ -80,7 +80,7 @@ var _ = Describe("Saving blocks", func() {
|
||||
blockNonce := "0x881db2ca900682e9a9"
|
||||
miner := "x123"
|
||||
extraData := "xextraData"
|
||||
blockTime := int64(1508981640)
|
||||
blockTime := uint64(1508981640)
|
||||
uncleHash := "x789"
|
||||
blockSize := string("1000")
|
||||
difficulty := int64(10)
|
||||
@ -98,7 +98,7 @@ var _ = Describe("Saving blocks", func() {
|
||||
Number: blockNumber,
|
||||
ParentHash: blockParentHash,
|
||||
Size: blockSize,
|
||||
Time: blockTime,
|
||||
Time: uint64(blockTime),
|
||||
UncleHash: uncleHash,
|
||||
UnclesReward: unclesReward,
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ func (blockChain *BlockChain) getPOAHeader(blockNumber int64) (header core.Heade
|
||||
Number: POAHeader.Number.ToInt(),
|
||||
GasLimit: uint64(POAHeader.GasLimit),
|
||||
GasUsed: uint64(POAHeader.GasUsed),
|
||||
Time: POAHeader.Time.ToInt(),
|
||||
Time: uint64(POAHeader.Time),
|
||||
Extra: POAHeader.Extra,
|
||||
}, POAHeader.Hash.String()), nil
|
||||
}
|
||||
@ -211,7 +211,7 @@ func (blockChain *BlockChain) getPOAHeaders(blockNumbers []int64) (headers []cor
|
||||
Number: POAHeader.Number.ToInt(),
|
||||
GasLimit: uint64(POAHeader.GasLimit),
|
||||
GasUsed: uint64(POAHeader.GasUsed),
|
||||
Time: POAHeader.Time.ToInt(),
|
||||
Time: uint64(POAHeader.Time),
|
||||
Extra: POAHeader.Extra,
|
||||
}, POAHeader.Hash.String())
|
||||
|
||||
|
@ -19,6 +19,7 @@ package common
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -52,7 +53,7 @@ func (bc BlockConverter) ToCoreBlock(gethBlock *types.Block) (core.Block, error)
|
||||
Number: gethBlock.Number().Int64(),
|
||||
ParentHash: gethBlock.ParentHash().Hex(),
|
||||
Size: gethBlock.Size().String(),
|
||||
Time: gethBlock.Time().Int64(),
|
||||
Time: gethBlock.Time(),
|
||||
Transactions: transactions,
|
||||
UncleHash: gethBlock.UncleHash().Hex(),
|
||||
}
|
||||
@ -81,7 +82,7 @@ func (bc BlockConverter) ToCoreUncle(block core.Block, uncles []*types.Header) (
|
||||
Hash: uncle.Hash().Hex(),
|
||||
Raw: raw,
|
||||
Reward: thisUncleReward.String(),
|
||||
Timestamp: uncle.Time.String(),
|
||||
Timestamp: strconv.FormatUint(uncle.Time, 10),
|
||||
}
|
||||
coreUncles = append(coreUncles, coreUncle)
|
||||
totalUncleRewards.Add(totalUncleRewards, thisUncleReward)
|
||||
|
@ -44,7 +44,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
extraData, _ := hexutil.Decode("0xe4b883e5bda9e7a59ee4bb99e9b1bc")
|
||||
nonce := types.BlockNonce{10}
|
||||
number := int64(1)
|
||||
time := int64(140000000)
|
||||
time := uint64(140000000)
|
||||
|
||||
header := types.Header{
|
||||
Difficulty: difficulty,
|
||||
@ -55,7 +55,7 @@ var _ = Describe("Conversion of GethBlock to core.Block", func() {
|
||||
Nonce: nonce,
|
||||
Number: big.NewInt(number),
|
||||
ParentHash: common.Hash{64},
|
||||
Time: big.NewInt(time),
|
||||
Time: time,
|
||||
UncleHash: common.Hash{128},
|
||||
}
|
||||
block := types.NewBlock(&header, []*types.Transaction{}, []*types.Header{}, []*types.Receipt{})
|
||||
|
@ -18,6 +18,7 @@ package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
@ -34,7 +35,7 @@ func (converter HeaderConverter) Convert(gethHeader *types.Header, blockHash str
|
||||
Hash: blockHash,
|
||||
BlockNumber: gethHeader.Number.Int64(),
|
||||
Raw: rawHeader,
|
||||
Timestamp: gethHeader.Time.String(),
|
||||
Timestamp: strconv.FormatUint(gethHeader.Time, 10),
|
||||
}
|
||||
return coreHeader
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ package common_test
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -37,7 +38,7 @@ var _ = Describe("Block header converter", func() {
|
||||
ParentHash: common.HexToHash("0xParent"),
|
||||
ReceiptHash: common.HexToHash("0xReceipt"),
|
||||
Root: common.HexToHash("0xRoot"),
|
||||
Time: big.NewInt(123456789),
|
||||
Time: uint64(123456789),
|
||||
TxHash: common.HexToHash("0xTransaction"),
|
||||
UncleHash: common.HexToHash("0xUncle"),
|
||||
}
|
||||
@ -48,7 +49,7 @@ var _ = Describe("Block header converter", func() {
|
||||
|
||||
Expect(coreHeader.BlockNumber).To(Equal(gethHeader.Number.Int64()))
|
||||
Expect(coreHeader.Hash).To(Equal(hash))
|
||||
Expect(coreHeader.Timestamp).To(Equal(gethHeader.Time.String()))
|
||||
Expect(coreHeader.Timestamp).To(Equal(strconv.FormatUint(gethHeader.Time, 10)))
|
||||
})
|
||||
|
||||
It("includes raw bytes for header as JSON", func() {
|
||||
|
5
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
5
vendor/github.com/allegro/bigcache/.gitignore
generated
vendored
@ -2,4 +2,9 @@
|
||||
.DS_Store
|
||||
/server/server.exe
|
||||
/server/server
|
||||
/server/server_dar*
|
||||
/server/server_fre*
|
||||
/server/server_win*
|
||||
/server/server_net*
|
||||
/server/server_ope*
|
||||
CHANGELOG.md
|
||||
|
2
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
2
vendor/github.com/allegro/bigcache/.travis.yml
generated
vendored
@ -14,7 +14,7 @@ before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get golang.org/x/tools/cmd/goimports
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get golang.org/x/lint/golint
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/gordonklaus/ineffassign
|
||||
|
||||
|
39
vendor/github.com/allegro/bigcache/README.md
generated
vendored
39
vendor/github.com/allegro/bigcache/README.md
generated
vendored
@ -46,10 +46,15 @@ config := bigcache.Config {
|
||||
// if value is reached then the oldest entries can be overridden for the new ones
|
||||
// 0 value means no size limit
|
||||
HardMaxCacheSize: 8192,
|
||||
// callback fired when the oldest entry is removed because of its
|
||||
// expiration time or no space left for the new entry. Default value is nil which
|
||||
// means no callback and it prevents from unwrapping the oldest entry.
|
||||
// callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A bitmask representing the reason will be returned.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove: nil,
|
||||
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
// Ignored if OnRemove is specified.
|
||||
OnRemoveWithReason: nil,
|
||||
}
|
||||
|
||||
cache, initErr := bigcache.NewBigCache(config)
|
||||
@ -74,20 +79,20 @@ Benchmark tests were made using an i7-6700K with 32GB of RAM on Windows 10.
|
||||
```bash
|
||||
cd caches_bench; go test -bench=. -benchtime=10s ./... -timeout 30m
|
||||
|
||||
BenchmarkMapSet-8 2000000 716 ns/op 336 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapSet-8 1000000 1292 ns/op 347 B/op 8 allocs/op
|
||||
BenchmarkFreeCacheSet-8 3000000 501 ns/op 371 B/op 3 allocs/op
|
||||
BenchmarkBigCacheSet-8 3000000 482 ns/op 303 B/op 2 allocs/op
|
||||
BenchmarkMapGet-8 5000000 309 ns/op 24 B/op 1 allocs/op
|
||||
BenchmarkConcurrentMapGet-8 2000000 659 ns/op 24 B/op 2 allocs/op
|
||||
BenchmarkFreeCacheGet-8 3000000 541 ns/op 152 B/op 3 allocs/op
|
||||
BenchmarkBigCacheGet-8 3000000 420 ns/op 152 B/op 3 allocs/op
|
||||
BenchmarkBigCacheSetParallel-8 10000000 184 ns/op 313 B/op 3 allocs/op
|
||||
BenchmarkFreeCacheSetParallel-8 10000000 195 ns/op 357 B/op 4 allocs/op
|
||||
BenchmarkConcurrentMapSetParallel-8 5000000 242 ns/op 200 B/op 6 allocs/op
|
||||
BenchmarkBigCacheGetParallel-8 20000000 100 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkFreeCacheGetParallel-8 10000000 133 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkConcurrentMapGetParallel-8 10000000 202 ns/op 24 B/op 2 allocs/op
|
||||
BenchmarkMapSet-8 3000000 569 ns/op 202 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapSet-8 1000000 1592 ns/op 347 B/op 8 allocs/op
|
||||
BenchmarkFreeCacheSet-8 3000000 775 ns/op 355 B/op 2 allocs/op
|
||||
BenchmarkBigCacheSet-8 3000000 640 ns/op 303 B/op 2 allocs/op
|
||||
BenchmarkMapGet-8 5000000 407 ns/op 24 B/op 1 allocs/op
|
||||
BenchmarkConcurrentMapGet-8 3000000 558 ns/op 24 B/op 2 allocs/op
|
||||
BenchmarkFreeCacheGet-8 2000000 682 ns/op 136 B/op 2 allocs/op
|
||||
BenchmarkBigCacheGet-8 3000000 512 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkBigCacheSetParallel-8 10000000 225 ns/op 313 B/op 3 allocs/op
|
||||
BenchmarkFreeCacheSetParallel-8 10000000 218 ns/op 341 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapSetParallel-8 5000000 318 ns/op 200 B/op 6 allocs/op
|
||||
BenchmarkBigCacheGetParallel-8 20000000 178 ns/op 152 B/op 4 allocs/op
|
||||
BenchmarkFreeCacheGetParallel-8 20000000 295 ns/op 136 B/op 3 allocs/op
|
||||
BenchmarkConcurrentMapGetParallel-8 10000000 237 ns/op 24 B/op 2 allocs/op
|
||||
```
|
||||
|
||||
Writes and reads in bigcache are faster than in freecache.
|
||||
|
71
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
71
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
@ -10,7 +10,7 @@ const (
|
||||
)
|
||||
|
||||
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
||||
// It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
|
||||
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
|
||||
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||
type BigCache struct {
|
||||
shards []*cacheShard
|
||||
@ -20,8 +20,22 @@ type BigCache struct {
|
||||
config Config
|
||||
shardMask uint64
|
||||
maxShardSize uint32
|
||||
close chan struct{}
|
||||
}
|
||||
|
||||
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
|
||||
type RemoveReason uint32
|
||||
|
||||
const (
|
||||
// Expired means the key is past its LifeWindow.
|
||||
Expired RemoveReason = iota
|
||||
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
|
||||
// entry exceeded the maximum shard size.
|
||||
NoSpace
|
||||
// Deleted means Delete was called and this key was removed as a result.
|
||||
Deleted
|
||||
)
|
||||
|
||||
// NewBigCache initialize new instance of BigCache
|
||||
func NewBigCache(config Config) (*BigCache, error) {
|
||||
return newBigCache(config, &systemClock{})
|
||||
@ -45,13 +59,16 @@ func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||
config: config,
|
||||
shardMask: uint64(config.Shards - 1),
|
||||
maxShardSize: uint32(config.maximumShardSize()),
|
||||
close: make(chan struct{}),
|
||||
}
|
||||
|
||||
var onRemove func(wrappedEntry []byte)
|
||||
if config.OnRemove == nil {
|
||||
onRemove = cache.notProvidedOnRemove
|
||||
} else {
|
||||
var onRemove func(wrappedEntry []byte, reason RemoveReason)
|
||||
if config.OnRemove != nil {
|
||||
onRemove = cache.providedOnRemove
|
||||
} else if config.OnRemoveWithReason != nil {
|
||||
onRemove = cache.providedOnRemoveWithReason
|
||||
} else {
|
||||
onRemove = cache.notProvidedOnRemove
|
||||
}
|
||||
|
||||
for i := 0; i < config.Shards; i++ {
|
||||
@ -60,8 +77,15 @@ func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||
|
||||
if config.CleanWindow > 0 {
|
||||
go func() {
|
||||
for t := range time.Tick(config.CleanWindow) {
|
||||
cache.cleanUp(uint64(t.Unix()))
|
||||
ticker := time.NewTicker(config.CleanWindow)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case t := <-ticker.C:
|
||||
cache.cleanUp(uint64(t.Unix()))
|
||||
case <-cache.close:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -69,8 +93,16 @@ func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
// Close is used to signal a shutdown of the cache when you are done with it.
|
||||
// This allows the cleaning goroutines to exit and ensures references are not
|
||||
// kept to the cache preventing GC of the entire cache.
|
||||
func (c *BigCache) Close() error {
|
||||
close(c.close)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get reads entry for the key.
|
||||
// It returns an EntryNotFoundError when
|
||||
// It returns an ErrEntryNotFound when
|
||||
// no entry exists for the given key.
|
||||
func (c *BigCache) Get(key string) ([]byte, error) {
|
||||
hashedKey := c.hash.Sum64(key)
|
||||
@ -109,6 +141,15 @@ func (c *BigCache) Len() int {
|
||||
return len
|
||||
}
|
||||
|
||||
// Capacity returns amount of bytes store in the cache.
|
||||
func (c *BigCache) Capacity() int {
|
||||
var len int
|
||||
for _, shard := range c.shards {
|
||||
len += shard.capacity()
|
||||
}
|
||||
return len
|
||||
}
|
||||
|
||||
// Stats returns cache's statistics
|
||||
func (c *BigCache) Stats() Stats {
|
||||
var s Stats
|
||||
@ -128,10 +169,10 @@ func (c *BigCache) Iterator() *EntryInfoIterator {
|
||||
return newIterator(c)
|
||||
}
|
||||
|
||||
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
|
||||
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||
if currentTimestamp-oldestTimestamp > c.lifeWindow {
|
||||
evict()
|
||||
evict(Expired)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -147,9 +188,15 @@ func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
|
||||
return c.shards[hashedKey&c.shardMask]
|
||||
}
|
||||
|
||||
func (c *BigCache) providedOnRemove(wrappedEntry []byte) {
|
||||
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
|
||||
}
|
||||
|
||||
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) {
|
||||
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) {
|
||||
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 {
|
||||
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||
}
|
||||
|
26
vendor/github.com/allegro/bigcache/bigcache_bench_test.go
generated
vendored
26
vendor/github.com/allegro/bigcache/bigcache_bench_test.go
generated
vendored
@ -98,6 +98,14 @@ func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
|
||||
writeToCache(b, 1024, 100*time.Second, 100)
|
||||
}
|
||||
|
||||
func BenchmarkReadFromCacheNonExistentKeys(b *testing.B) {
|
||||
for _, shards := range []int{1, 512, 1024, 8192} {
|
||||
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
|
||||
readFromCacheNonExistentKeys(b, 1024)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: shards,
|
||||
@ -139,3 +147,21 @@ func readFromCache(b *testing.B, shards int) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func readFromCacheNonExistentKeys(b *testing.B, shards int) {
|
||||
cache, _ := NewBigCache(Config{
|
||||
Shards: shards,
|
||||
LifeWindow: 1000 * time.Second,
|
||||
MaxEntriesInWindow: max(b.N, 100),
|
||||
MaxEntrySize: 500,
|
||||
})
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
b.ReportAllocs()
|
||||
|
||||
for pb.Next() {
|
||||
cache.Get(strconv.Itoa(rand.Intn(b.N)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
171
vendor/github.com/allegro/bigcache/bigcache_test.go
generated
vendored
171
vendor/github.com/allegro/bigcache/bigcache_test.go
generated
vendored
@ -1,7 +1,10 @@
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@ -105,7 +108,7 @@ func TestEntryNotFound(t *testing.T) {
|
||||
_, err := cache.Get("nonExistingKey")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Entry \"nonExistingKey\" not found")
|
||||
assert.EqualError(t, err, ErrEntryNotFound.Error())
|
||||
}
|
||||
|
||||
func TestTimingEviction(t *testing.T) {
|
||||
@ -127,7 +130,7 @@ func TestTimingEviction(t *testing.T) {
|
||||
_, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Entry \"key\" not found")
|
||||
assert.EqualError(t, err, ErrEntryNotFound.Error())
|
||||
}
|
||||
|
||||
func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) {
|
||||
@ -149,7 +152,7 @@ func TestTimingEvictionShouldEvictOnlyFromUpdatedShard(t *testing.T) {
|
||||
value, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.NoError(t, err, "Entry \"key\" not found")
|
||||
assert.NoError(t, err, ErrEntryNotFound.Error())
|
||||
assert.Equal(t, []byte("value"), value)
|
||||
}
|
||||
|
||||
@ -171,7 +174,7 @@ func TestCleanShouldEvictAll(t *testing.T) {
|
||||
value, err := cache.Get("key")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, err, "Entry \"key\" not found")
|
||||
assert.EqualError(t, err, ErrEntryNotFound.Error())
|
||||
assert.Equal(t, value, []byte(nil))
|
||||
}
|
||||
|
||||
@ -181,17 +184,22 @@ func TestOnRemoveCallback(t *testing.T) {
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
onRemoveInvoked := false
|
||||
onRemoveExtInvoked := false
|
||||
onRemove := func(key string, entry []byte) {
|
||||
onRemoveInvoked = true
|
||||
assert.Equal(t, "key", key)
|
||||
assert.Equal(t, []byte("value"), entry)
|
||||
}
|
||||
onRemoveExt := func(key string, entry []byte, reason RemoveReason) {
|
||||
onRemoveExtInvoked = true
|
||||
}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
OnRemove: onRemove,
|
||||
OnRemoveWithReason: onRemoveExt,
|
||||
}, &clock)
|
||||
|
||||
// when
|
||||
@ -199,6 +207,70 @@ func TestOnRemoveCallback(t *testing.T) {
|
||||
clock.set(5)
|
||||
cache.Set("key2", []byte("value2"))
|
||||
|
||||
// then
|
||||
assert.True(t, onRemoveInvoked)
|
||||
assert.False(t, onRemoveExtInvoked)
|
||||
}
|
||||
|
||||
func TestOnRemoveWithReasonCallback(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
onRemoveInvoked := false
|
||||
onRemove := func(key string, entry []byte, reason RemoveReason) {
|
||||
onRemoveInvoked = true
|
||||
assert.Equal(t, "key", key)
|
||||
assert.Equal(t, []byte("value"), entry)
|
||||
assert.Equal(t, reason, RemoveReason(Expired))
|
||||
}
|
||||
cache, _ := newBigCache(Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
OnRemoveWithReason: onRemove,
|
||||
}, &clock)
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
clock.set(5)
|
||||
cache.Set("key2", []byte("value2"))
|
||||
|
||||
// then
|
||||
assert.True(t, onRemoveInvoked)
|
||||
}
|
||||
|
||||
func TestOnRemoveFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// given
|
||||
clock := mockedClock{value: 0}
|
||||
onRemoveInvoked := false
|
||||
onRemove := func(key string, entry []byte, reason RemoveReason) {
|
||||
onRemoveInvoked = true
|
||||
}
|
||||
c := Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
MaxEntriesInWindow: 1,
|
||||
MaxEntrySize: 256,
|
||||
OnRemoveWithReason: onRemove,
|
||||
}.OnRemoveFilterSet(Deleted, NoSpace)
|
||||
|
||||
cache, _ := newBigCache(c, &clock)
|
||||
|
||||
// when
|
||||
cache.Set("key", []byte("value"))
|
||||
clock.set(5)
|
||||
cache.Set("key2", []byte("value2"))
|
||||
|
||||
// then
|
||||
assert.False(t, onRemoveInvoked)
|
||||
|
||||
// and when
|
||||
cache.Delete("key2")
|
||||
|
||||
// then
|
||||
assert.True(t, onRemoveInvoked)
|
||||
}
|
||||
@ -276,7 +348,7 @@ func TestCacheDel(t *testing.T) {
|
||||
err := cache.Delete("nonExistingKey")
|
||||
|
||||
// then
|
||||
assert.Equal(t, err.Error(), "Entry \"nonExistingKey\" not found")
|
||||
assert.Equal(t, err.Error(), ErrEntryNotFound.Error())
|
||||
|
||||
// and when
|
||||
cache.Set("existingKey", nil)
|
||||
@ -288,6 +360,67 @@ func TestCacheDel(t *testing.T) {
|
||||
assert.Len(t, cachedValue, 0)
|
||||
}
|
||||
|
||||
// TestCacheDelRandomly does simultaneous deletes, puts and gets, to check for corruption errors.
|
||||
func TestCacheDelRandomly(t *testing.T) {
|
||||
t.Parallel()
|
||||
c := Config{
|
||||
Shards: 1,
|
||||
LifeWindow: time.Second,
|
||||
CleanWindow: 0,
|
||||
MaxEntriesInWindow: 10,
|
||||
MaxEntrySize: 10,
|
||||
Verbose: true,
|
||||
Hasher: newDefaultHasher(),
|
||||
HardMaxCacheSize: 1,
|
||||
Logger: DefaultLogger(),
|
||||
}
|
||||
//c.Hasher = hashStub(5)
|
||||
cache, _ := NewBigCache(c)
|
||||
var wg sync.WaitGroup
|
||||
var ntest = 800000
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for i := 0; i < ntest; i++ {
|
||||
r := uint8(rand.Int())
|
||||
key := fmt.Sprintf("thekey%d", r)
|
||||
|
||||
cache.Delete(key)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
val := make([]byte, 1024)
|
||||
for i := 0; i < ntest; i++ {
|
||||
r := byte(rand.Int())
|
||||
key := fmt.Sprintf("thekey%d", r)
|
||||
|
||||
for j := 0; j < len(val); j++ {
|
||||
val[j] = r
|
||||
}
|
||||
cache.Set(key, val)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
val := make([]byte, 1024)
|
||||
for i := 0; i < ntest; i++ {
|
||||
r := byte(rand.Int())
|
||||
key := fmt.Sprintf("thekey%d", r)
|
||||
|
||||
for j := 0; j < len(val); j++ {
|
||||
val[j] = r
|
||||
}
|
||||
if got, err := cache.Get(key); err == nil && !bytes.Equal(got, val) {
|
||||
t.Errorf("got %s ->\n %x\n expected:\n %x\n ", key, got, val)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestCacheReset(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -369,7 +502,7 @@ func TestGetOnResetCache(t *testing.T) {
|
||||
// then
|
||||
value, err := cache.Get("key1")
|
||||
|
||||
assert.Equal(t, err.Error(), "Entry \"key1\" not found")
|
||||
assert.Equal(t, err.Error(), ErrEntryNotFound.Error())
|
||||
assert.Equal(t, value, []byte(nil))
|
||||
}
|
||||
|
||||
@ -419,8 +552,8 @@ func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
|
||||
entry3, _ := cache.Get("key3")
|
||||
|
||||
// then
|
||||
assert.EqualError(t, key1Err, "Entry \"key1\" not found")
|
||||
assert.EqualError(t, key2Err, "Entry \"key2\" not found")
|
||||
assert.EqualError(t, key1Err, ErrEntryNotFound.Error())
|
||||
assert.EqualError(t, key2Err, ErrEntryNotFound.Error())
|
||||
assert.Equal(t, blob('c', 1024*800), entry3)
|
||||
}
|
||||
|
||||
@ -548,6 +681,28 @@ func TestNilValueCaching(t *testing.T) {
|
||||
assert.Equal(t, []byte{}, cachedValue)
|
||||
}
|
||||
|
||||
func TestClosing(t *testing.T) {
|
||||
// given
|
||||
config := Config{
|
||||
CleanWindow: time.Minute,
|
||||
}
|
||||
startGR := runtime.NumGoroutine()
|
||||
|
||||
// when
|
||||
for i := 0; i < 100; i++ {
|
||||
cache, _ := NewBigCache(config)
|
||||
cache.Close()
|
||||
}
|
||||
|
||||
// wait till all goroutines are stopped.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// then
|
||||
endGR := runtime.NumGoroutine()
|
||||
assert.True(t, endGR >= startGR)
|
||||
assert.InDelta(t, endGR, startGR, 25)
|
||||
}
|
||||
|
||||
type mockedLogger struct {
|
||||
lastFormat string
|
||||
lastArgs []interface{}
|
||||
|
14
vendor/github.com/allegro/bigcache/bytes.go
generated
vendored
Normal file
14
vendor/github.com/allegro/bigcache/bytes.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// +build !appengine
|
||||
|
||||
package bigcache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
|
||||
return *(*string)(unsafe.Pointer(&strHeader))
|
||||
}
|
7
vendor/github.com/allegro/bigcache/bytes_appengine.go
generated
vendored
Normal file
7
vendor/github.com/allegro/bigcache/bytes_appengine.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build appengine
|
||||
|
||||
package bigcache
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
2
vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go
generated
vendored
2
vendor/github.com/allegro/bigcache/caches_bench/caches_bench_test.go
generated
vendored
@ -14,7 +14,7 @@ import (
|
||||
const maxEntrySize = 256
|
||||
|
||||
func BenchmarkMapSet(b *testing.B) {
|
||||
m := make(map[string][]byte)
|
||||
m := make(map[string][]byte, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m[key(i)] = value()
|
||||
}
|
||||
|
21
vendor/github.com/allegro/bigcache/config.go
generated
vendored
21
vendor/github.com/allegro/bigcache/config.go
generated
vendored
@ -26,8 +26,16 @@ type Config struct {
|
||||
// the oldest entries are overridden for the new ones.
|
||||
HardMaxCacheSize int
|
||||
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
// for the new entry, or because delete was called.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
OnRemove func(key string, entry []byte)
|
||||
// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
|
||||
// for the new entry, or because delete was called. A constant representing the reason will be passed through.
|
||||
// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
|
||||
// Ignored if OnRemove is specified.
|
||||
OnRemoveWithReason func(key string, entry []byte, reason RemoveReason)
|
||||
|
||||
onRemoveFilter int
|
||||
|
||||
// Logger is a logging interface and used in combination with `Verbose`
|
||||
// Defaults to `DefaultLogger()`
|
||||
@ -65,3 +73,14 @@ func (c Config) maximumShardSize() int {
|
||||
|
||||
return maxShardSize
|
||||
}
|
||||
|
||||
// OnRemoveFilterSet sets which remove reasons will trigger a call to OnRemoveWithReason.
|
||||
// Filtering out reasons prevents bigcache from unwrapping them, which saves cpu.
|
||||
func (c Config) OnRemoveFilterSet(reasons ...RemoveReason) Config {
|
||||
c.onRemoveFilter = 0
|
||||
for i := range reasons {
|
||||
c.onRemoveFilter |= 1 << uint(reasons[i])
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
8
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
8
vendor/github.com/allegro/bigcache/encoding.go
generated
vendored
@ -2,8 +2,6 @@ package bigcache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -55,12 +53,6 @@ func readKeyFromEntry(data []byte) string {
|
||||
return bytesToString(dst)
|
||||
}
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
|
||||
return *(*string)(unsafe.Pointer(&strHeader))
|
||||
}
|
||||
|
||||
func readHashFromEntry(data []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
|
||||
}
|
||||
|
17
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
17
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
@ -1,17 +1,6 @@
|
||||
package bigcache
|
||||
|
||||
import "fmt"
|
||||
import "errors"
|
||||
|
||||
// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
|
||||
type EntryNotFoundError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func notFound(key string) error {
|
||||
return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)}
|
||||
}
|
||||
|
||||
// Error returned when entry does not exist.
|
||||
func (e EntryNotFoundError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
|
||||
var ErrEntryNotFound = errors.New("Entry not found")
|
||||
|
9
vendor/github.com/allegro/bigcache/go.mod
generated
vendored
Normal file
9
vendor/github.com/allegro/bigcache/go.mod
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
module github.com/allegro/bigcache
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/coocood/freecache v1.1.0
|
||||
github.com/stretchr/testify v1.3.0
|
||||
)
|
13
vendor/github.com/allegro/bigcache/go.sum
generated
vendored
Normal file
13
vendor/github.com/allegro/bigcache/go.sum
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA=
|
||||
github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
32
vendor/github.com/allegro/bigcache/iterator_test.go
generated
vendored
32
vendor/github.com/allegro/bigcache/iterator_test.go
generated
vendored
@ -2,6 +2,9 @@ package bigcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -148,3 +151,32 @@ func TestEntriesIteratorInInvalidState(t *testing.T) {
|
||||
assert.Equal(t, ErrInvalidIteratorState, err)
|
||||
assert.Equal(t, "Iterator is in invalid state. Use SetNext() to move to next position", err.Error())
|
||||
}
|
||||
|
||||
func TestEntriesIteratorParallelAdd(t *testing.T) {
|
||||
bc, err := NewBigCache(DefaultConfig(1 * time.Minute))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for i := 0; i < 10000; i++ {
|
||||
err := bc.Set(strconv.Itoa(i), []byte("aaaaaaa"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
runtime.Gosched()
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
iter := bc.Iterator()
|
||||
for iter.SetNext() {
|
||||
_, _ = iter.Value()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
36
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
36
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
@ -16,6 +16,12 @@ const (
|
||||
minimumEmptyBlobSize = 32 + headerEntrySize
|
||||
)
|
||||
|
||||
var (
|
||||
errEmptyQueue = &queueError{"Empty queue"}
|
||||
errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."}
|
||||
errIndexOutOfBounds = &queueError{"Index out of range"}
|
||||
)
|
||||
|
||||
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
||||
// For every push operation index of entry is returned. It can be used to read the entry later
|
||||
type BytesQueue struct {
|
||||
@ -162,6 +168,11 @@ func (q *BytesQueue) Get(index int) ([]byte, error) {
|
||||
return data, err
|
||||
}
|
||||
|
||||
// CheckGet checks if an entry can be read from index
|
||||
func (q *BytesQueue) CheckGet(index int) error {
|
||||
return q.peekCheckErr(index)
|
||||
}
|
||||
|
||||
// Capacity returns number of allocated bytes for queue
|
||||
func (q *BytesQueue) Capacity() int {
|
||||
return q.capacity
|
||||
@ -177,18 +188,35 @@ func (e *queueError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
||||
// peekCheckErr is identical to peek, but does not actually return any data
|
||||
func (q *BytesQueue) peekCheckErr(index int) error {
|
||||
|
||||
if q.count == 0 {
|
||||
return nil, 0, &queueError{"Empty queue"}
|
||||
return errEmptyQueue
|
||||
}
|
||||
|
||||
if index <= 0 {
|
||||
return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
|
||||
return errInvalidIndex
|
||||
}
|
||||
|
||||
if index+headerEntrySize >= len(q.array) {
|
||||
return nil, 0, &queueError{"Index out of range"}
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
||||
|
||||
if q.count == 0 {
|
||||
return nil, 0, errEmptyQueue
|
||||
}
|
||||
|
||||
if index <= 0 {
|
||||
return nil, 0, errInvalidIndex
|
||||
}
|
||||
|
||||
if index+headerEntrySize >= len(q.array) {
|
||||
return nil, 0, errIndexOutOfBounds
|
||||
}
|
||||
|
||||
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
|
||||
|
13
vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go
generated
vendored
13
vendor/github.com/allegro/bigcache/queue/bytes_queue_test.go
generated
vendored
@ -50,16 +50,19 @@ func TestPeek(t *testing.T) {
|
||||
|
||||
// when
|
||||
read, err := queue.Peek()
|
||||
|
||||
err2 := queue.peekCheckErr(queue.head)
|
||||
// then
|
||||
assert.Equal(t, err, err2)
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
assert.Nil(t, read)
|
||||
|
||||
// when
|
||||
queue.Push(entry)
|
||||
read, err = queue.Peek()
|
||||
err2 = queue.peekCheckErr(queue.head)
|
||||
|
||||
// then
|
||||
assert.Equal(t, err, err2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pop(queue), read)
|
||||
assert.Equal(t, entry, read)
|
||||
@ -286,10 +289,12 @@ func TestGetEntryFromInvalidIndex(t *testing.T) {
|
||||
|
||||
// when
|
||||
result, err := queue.Get(0)
|
||||
err2 := queue.CheckGet(0)
|
||||
|
||||
// then
|
||||
assert.Equal(t, err, err2)
|
||||
assert.Nil(t, result)
|
||||
assert.EqualError(t, err, "Index must be grater than zero. Invalid index.")
|
||||
assert.EqualError(t, err, "Index must be greater than zero. Invalid index.")
|
||||
}
|
||||
|
||||
func TestGetEntryFromIndexOutOfRange(t *testing.T) {
|
||||
@ -301,8 +306,10 @@ func TestGetEntryFromIndexOutOfRange(t *testing.T) {
|
||||
|
||||
// when
|
||||
result, err := queue.Get(42)
|
||||
err2 := queue.CheckGet(42)
|
||||
|
||||
// then
|
||||
assert.Equal(t, err, err2)
|
||||
assert.Nil(t, result)
|
||||
assert.EqualError(t, err, "Index out of range")
|
||||
}
|
||||
@ -315,8 +322,10 @@ func TestGetEntryFromEmptyQueue(t *testing.T) {
|
||||
|
||||
// when
|
||||
result, err := queue.Get(1)
|
||||
err2 := queue.CheckGet(1)
|
||||
|
||||
// then
|
||||
assert.Equal(t, err, err2)
|
||||
assert.Nil(t, result)
|
||||
assert.EqualError(t, err, "Empty queue")
|
||||
}
|
||||
|
47
vendor/github.com/allegro/bigcache/server/middleware_test.go
generated
vendored
Normal file
47
vendor/github.com/allegro/bigcache/server/middleware_test.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func emptyTestHandler() service {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceLoader(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/api/v1/stats", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
testHandlers := serviceLoader(cacheIndexHandler(), emptyTestHandler())
|
||||
testHandlers.ServeHTTP(rr, req)
|
||||
if status := rr.Code; status != http.StatusAccepted {
|
||||
t.Errorf("handlers not loading properly. want: 202, got: %d", rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestMetrics(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
logger := log.New(&b, "", log.LstdFlags)
|
||||
req, err := http.NewRequest("GET", "/api/v1/cache/empty", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
testHandlers := serviceLoader(cacheIndexHandler(), requestMetrics(logger))
|
||||
testHandlers.ServeHTTP(rr, req)
|
||||
targetTestString := b.String()
|
||||
if len(targetTestString) == 0 {
|
||||
t.Errorf("we are not logging request length strings.")
|
||||
}
|
||||
t.Log(targetTestString)
|
||||
}
|
98
vendor/github.com/allegro/bigcache/server/server_test.go
generated
vendored
98
vendor/github.com/allegro/bigcache/server/server_test.go
generated
vendored
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
@ -183,3 +184,100 @@ func TestGetStats(t *testing.T) {
|
||||
t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatsIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
var testStats bigcache.Stats
|
||||
|
||||
getreq := httptest.NewRequest("GET", testBaseString+"/api/v1/stats", nil)
|
||||
putreq := httptest.NewRequest("PUT", testBaseString+"/api/v1/stats", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// manually enter a key so there are some stats. get it so there's at least 1 hit.
|
||||
if err := cache.Set("incrementStats", []byte("123")); err != nil {
|
||||
t.Errorf("error setting cache value. error %s", err)
|
||||
}
|
||||
// it's okay if this fails, since we'll catch it downstream.
|
||||
if _, err := cache.Get("incrementStats"); err != nil {
|
||||
t.Errorf("can't find incrementStats. error: %s", err)
|
||||
}
|
||||
|
||||
testHandlers := statsIndexHandler()
|
||||
testHandlers.ServeHTTP(rr, getreq)
|
||||
resp := rr.Result()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&testStats); err != nil {
|
||||
t.Errorf("error decoding cache stats. error: %s", err)
|
||||
}
|
||||
|
||||
if testStats.Hits == 0 {
|
||||
t.Errorf("want: > 0; got: 0.\n\thandler not properly returning stats info.")
|
||||
}
|
||||
|
||||
testHandlers = statsIndexHandler()
|
||||
testHandlers.ServeHTTP(rr, putreq)
|
||||
resp = rr.Result()
|
||||
_, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("cannot deserialise test response: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheIndexHandler(t *testing.T) {
|
||||
getreq := httptest.NewRequest("GET", testBaseString+"/api/v1/cache/testkey", nil)
|
||||
putreq := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/testkey", bytes.NewBuffer([]byte("123")))
|
||||
delreq := httptest.NewRequest("DELETE", testBaseString+"/api/v1/cache/testkey", bytes.NewBuffer([]byte("123")))
|
||||
|
||||
getrr := httptest.NewRecorder()
|
||||
putrr := httptest.NewRecorder()
|
||||
delrr := httptest.NewRecorder()
|
||||
testHandlers := cacheIndexHandler()
|
||||
|
||||
testHandlers.ServeHTTP(putrr, putreq)
|
||||
resp := putrr.Result()
|
||||
if resp.StatusCode != 201 {
|
||||
t.Errorf("want: 201; got: %d.\n\tcan't put keys.", resp.StatusCode)
|
||||
}
|
||||
testHandlers.ServeHTTP(getrr, getreq)
|
||||
resp = getrr.Result()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Errorf("want: 200; got: %d.\n\tcan't get keys.", resp.StatusCode)
|
||||
}
|
||||
testHandlers.ServeHTTP(delrr, delreq)
|
||||
resp = delrr.Result()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Errorf("want: 200; got: %d.\n\tcan't delete keys.", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPutWhenExceedShardCap(t *testing.T) {
|
||||
t.Parallel()
|
||||
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", bytes.NewBuffer(bytes.Repeat([]byte("a"), 8*1024*1024)))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
putCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 500 {
|
||||
t.Errorf("want: 500; got: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPutWhenReading(t *testing.T) {
|
||||
t.Parallel()
|
||||
req := httptest.NewRequest("PUT", testBaseString+"/api/v1/cache/putKey", errReader(0))
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
putCacheHandler(rr, req)
|
||||
resp := rr.Result()
|
||||
|
||||
if resp.StatusCode != 500 {
|
||||
t.Errorf("want: 500; got: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
type errReader int
|
||||
|
||||
func (errReader) Read([]byte) (int, error) {
|
||||
return 0, errors.New("test read error")
|
||||
}
|
||||
|
66
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
66
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
@ -8,12 +8,14 @@ import (
|
||||
"github.com/allegro/bigcache/queue"
|
||||
)
|
||||
|
||||
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason)
|
||||
|
||||
type cacheShard struct {
|
||||
hashmap map[uint64]uint32
|
||||
entries queue.BytesQueue
|
||||
lock sync.RWMutex
|
||||
entryBuffer []byte
|
||||
onRemove func(wrappedEntry []byte)
|
||||
onRemove onRemoveCallback
|
||||
|
||||
isVerbose bool
|
||||
logger Logger
|
||||
@ -23,8 +25,6 @@ type cacheShard struct {
|
||||
stats Stats
|
||||
}
|
||||
|
||||
type onRemoveCallback func(wrappedEntry []byte)
|
||||
|
||||
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||
s.lock.RLock()
|
||||
itemIndex := s.hashmap[hashedKey]
|
||||
@ -32,7 +32,7 @@ func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||
if itemIndex == 0 {
|
||||
s.lock.RUnlock()
|
||||
s.miss()
|
||||
return nil, notFound(key)
|
||||
return nil, ErrEntryNotFound
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
@ -47,11 +47,12 @@ func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||
}
|
||||
s.lock.RUnlock()
|
||||
s.collision()
|
||||
return nil, notFound(key)
|
||||
return nil, ErrEntryNotFound
|
||||
}
|
||||
entry := readEntry(wrappedEntry)
|
||||
s.lock.RUnlock()
|
||||
s.hit()
|
||||
return readEntry(wrappedEntry), nil
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||
@ -77,7 +78,7 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
if s.removeOldestEntry() != nil {
|
||||
if s.removeOldestEntry(NoSpace) != nil {
|
||||
s.lock.Unlock()
|
||||
return fmt.Errorf("entry is bigger than max shard size")
|
||||
}
|
||||
@ -85,17 +86,17 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||
}
|
||||
|
||||
func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||
// Optimistic pre-check using only readlock
|
||||
s.lock.RLock()
|
||||
itemIndex := s.hashmap[hashedKey]
|
||||
|
||||
if itemIndex == 0 {
|
||||
s.lock.RUnlock()
|
||||
s.delmiss()
|
||||
return notFound(key)
|
||||
return ErrEntryNotFound
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
if err != nil {
|
||||
if err := s.entries.CheckGet(int(itemIndex)); err != nil {
|
||||
s.lock.RUnlock()
|
||||
s.delmiss()
|
||||
return err
|
||||
@ -104,8 +105,25 @@ func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||
|
||||
s.lock.Lock()
|
||||
{
|
||||
// After obtaining the writelock, we need to read the same again,
|
||||
// since the data delivered earlier may be stale now
|
||||
itemIndex = s.hashmap[hashedKey]
|
||||
|
||||
if itemIndex == 0 {
|
||||
s.lock.Unlock()
|
||||
s.delmiss()
|
||||
return ErrEntryNotFound
|
||||
}
|
||||
|
||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||
if err != nil {
|
||||
s.lock.Unlock()
|
||||
s.delmiss()
|
||||
return err
|
||||
}
|
||||
|
||||
delete(s.hashmap, hashedKey)
|
||||
s.onRemove(wrappedEntry)
|
||||
s.onRemove(wrappedEntry, Deleted)
|
||||
resetKeyFromEntry(wrappedEntry)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
@ -114,10 +132,10 @@ func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
|
||||
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||
if currentTimestamp-oldestTimestamp > s.lifeWindow {
|
||||
evict()
|
||||
evict(Expired)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -136,17 +154,22 @@ func (s *cacheShard) cleanUp(currentTimestamp uint64) {
|
||||
}
|
||||
|
||||
func (s *cacheShard) getOldestEntry() ([]byte, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.entries.Peek()
|
||||
}
|
||||
|
||||
func (s *cacheShard) getEntry(index int) ([]byte, error) {
|
||||
return s.entries.Get(index)
|
||||
s.lock.RLock()
|
||||
entry, err := s.entries.Get(index)
|
||||
s.lock.RUnlock()
|
||||
|
||||
return entry, err
|
||||
}
|
||||
|
||||
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
||||
keys = make([]uint32, len(s.hashmap))
|
||||
|
||||
s.lock.RLock()
|
||||
keys = make([]uint32, len(s.hashmap))
|
||||
|
||||
for _, index := range s.hashmap {
|
||||
keys[next] = index
|
||||
@ -157,12 +180,12 @@ func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
||||
return keys, next
|
||||
}
|
||||
|
||||
func (s *cacheShard) removeOldestEntry() error {
|
||||
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {
|
||||
oldest, err := s.entries.Pop()
|
||||
if err == nil {
|
||||
hash := readHashFromEntry(oldest)
|
||||
delete(s.hashmap, hash)
|
||||
s.onRemove(oldest)
|
||||
s.onRemove(oldest, reason)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -183,6 +206,13 @@ func (s *cacheShard) len() int {
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *cacheShard) capacity() int {
|
||||
s.lock.RLock()
|
||||
res := s.entries.Capacity()
|
||||
s.lock.RUnlock()
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *cacheShard) getStats() Stats {
|
||||
var stats = Stats{
|
||||
Hits: atomic.LoadInt64(&s.stats.Hits),
|
||||
|
4
vendor/github.com/aristanetworks/goarista/.travis.yml
generated
vendored
4
vendor/github.com/aristanetworks/goarista/.travis.yml
generated
vendored
@ -1,10 +1,10 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.x
|
||||
- master
|
||||
before_install:
|
||||
- go get -v github.com/golang/lint/golint
|
||||
- go get -v golang.org/x/lint/golint
|
||||
- go get -v -t -d ./...
|
||||
after_success:
|
||||
- make coverdata
|
||||
|
2
vendor/github.com/aristanetworks/goarista/cmd/gnmi/README.md
generated
vendored
2
vendor/github.com/aristanetworks/goarista/cmd/gnmi/README.md
generated
vendored
@ -164,7 +164,7 @@ Example:
|
||||
|
||||
Configure the idle-timeout on SSH connections
|
||||
```
|
||||
gnmi [OPTIONS] update 'cli' 'management ssh
|
||||
gnmi [OPTIONS] update 'origin=cli' "" 'management ssh
|
||||
idle-timeout 300'
|
||||
```
|
||||
|
||||
|
29
vendor/github.com/aristanetworks/goarista/cmd/gnmi/main.go
generated
vendored
29
vendor/github.com/aristanetworks/goarista/cmd/gnmi/main.go
generated
vendored
@ -22,8 +22,8 @@ import (
|
||||
var help = `Usage of gnmi:
|
||||
gnmi -addr [<VRF-NAME>/]ADDRESS:PORT [options...]
|
||||
capabilities
|
||||
get PATH+
|
||||
subscribe PATH+
|
||||
get (origin=ORIGIN) PATH+
|
||||
subscribe (origin=ORIGIN) PATH+
|
||||
((update|replace (origin=ORIGIN) PATH JSON|FILE)|(delete (origin=ORIGIN) PATH))+
|
||||
`
|
||||
|
||||
@ -43,6 +43,8 @@ func main() {
|
||||
flag.StringVar(&cfg.KeyFile, "keyfile", "", "Path to client TLS private key file")
|
||||
flag.StringVar(&cfg.Password, "password", "", "Password to authenticate with")
|
||||
flag.StringVar(&cfg.Username, "username", "", "Username to authenticate with")
|
||||
flag.StringVar(&cfg.Compression, "compression", "gzip", "Compression method. "+
|
||||
`Supported options: "" and "gzip"`)
|
||||
flag.BoolVar(&cfg.TLS, "tls", false, "Enable TLS")
|
||||
|
||||
subscribeOptions := &gnmi.SubscribeOptions{}
|
||||
@ -103,7 +105,11 @@ func main() {
|
||||
if len(setOps) != 0 {
|
||||
usageAndExit("error: 'get' not allowed after 'merge|replace|delete'")
|
||||
}
|
||||
err := gnmi.Get(ctx, client, gnmi.SplitPaths(args[i+1:]))
|
||||
origin, ok := parseOrigin(args[i+1])
|
||||
if ok {
|
||||
i++
|
||||
}
|
||||
err := gnmi.Get(ctx, client, gnmi.SplitPaths(args[i+1:]), origin)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
@ -112,9 +118,14 @@ func main() {
|
||||
if len(setOps) != 0 {
|
||||
usageAndExit("error: 'subscribe' not allowed after 'merge|replace|delete'")
|
||||
}
|
||||
origin, ok := parseOrigin(args[i+1])
|
||||
if ok {
|
||||
i++
|
||||
}
|
||||
respChan := make(chan *pb.SubscribeResponse)
|
||||
errChan := make(chan error)
|
||||
defer close(errChan)
|
||||
subscribeOptions.Origin = origin
|
||||
subscribeOptions.Paths = gnmi.SplitPaths(args[i+1:])
|
||||
go gnmi.Subscribe(ctx, client, subscribeOptions, respChan, errChan)
|
||||
for {
|
||||
@ -138,8 +149,9 @@ func main() {
|
||||
Type: args[i],
|
||||
}
|
||||
i++
|
||||
if strings.HasPrefix(args[i], "origin=") {
|
||||
op.Origin = strings.TrimPrefix(args[i], "origin=")
|
||||
var ok bool
|
||||
op.Origin, ok = parseOrigin(args[i])
|
||||
if ok {
|
||||
i++
|
||||
}
|
||||
op.Path = gnmi.SplitPath(args[i])
|
||||
@ -164,3 +176,10 @@ func main() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func parseOrigin(s string) (string, bool) {
|
||||
if strings.HasPrefix(s, "origin=") {
|
||||
return strings.TrimPrefix(s, "origin="), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
28
vendor/github.com/aristanetworks/goarista/cmd/json2test/main.go
generated
vendored
28
vendor/github.com/aristanetworks/goarista/cmd/json2test/main.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@ -27,7 +28,10 @@ import (
|
||||
var errTestFailure = errors.New("testfailure")
|
||||
|
||||
func main() {
|
||||
err := writeTestOutput(os.Stdin, os.Stdout)
|
||||
verbose := flag.Bool("v", false, "Verbose output. "+
|
||||
"By default only failed tests emit verbose output in test result summary.")
|
||||
flag.Parse()
|
||||
err := writeTestOutput(os.Stdin, os.Stdout, *verbose)
|
||||
if err == errTestFailure {
|
||||
os.Exit(1)
|
||||
} else if err != nil {
|
||||
@ -62,7 +66,7 @@ type testFailure struct {
|
||||
o outputBuffer
|
||||
}
|
||||
|
||||
func writeTestOutput(in io.Reader, out io.Writer) error {
|
||||
func writeTestOutput(in io.Reader, out io.Writer, verbose bool) error {
|
||||
testOutputBuffer := map[test]*outputBuffer{}
|
||||
var failures []testFailure
|
||||
d := json.NewDecoder(in)
|
||||
@ -81,9 +85,25 @@ func writeTestOutput(in io.Reader, out io.Writer) error {
|
||||
case "run":
|
||||
testOutputBuffer[test{pkg: e.Package, test: e.Test}] = new(outputBuffer)
|
||||
case "pass":
|
||||
if !verbose && e.Test == "" {
|
||||
// Match go test output:
|
||||
// ok foo/bar 2.109s
|
||||
fmt.Fprintf(buf, "ok \t%s\t%.3fs\n", e.Package, e.Elapsed)
|
||||
}
|
||||
// Don't hold onto text for passing
|
||||
delete(testOutputBuffer, test{pkg: e.Package, test: e.Test})
|
||||
case "fail":
|
||||
if !verbose {
|
||||
if e.Test != "" {
|
||||
// Match go test output:
|
||||
// --- FAIL: TestFooBar (0.00s)
|
||||
fmt.Fprintf(buf, "--- FAIL: %s (%.3f)\n", e.Test, e.Elapsed)
|
||||
} else {
|
||||
// Match go test output:
|
||||
// FAIL foo/bar 1.444s
|
||||
fmt.Fprintf(buf, "FAIL\t%s\t%.3fs\n", e.Package, e.Elapsed)
|
||||
}
|
||||
}
|
||||
// fail may be for a package, which won't have an entry in
|
||||
// testOutputBuffer because packages don't have a "run"
|
||||
// action.
|
||||
@ -94,7 +114,9 @@ func writeTestOutput(in io.Reader, out io.Writer) error {
|
||||
failures = append(failures, f)
|
||||
}
|
||||
case "output":
|
||||
buf.WriteString(e.Output)
|
||||
if verbose {
|
||||
buf.WriteString(e.Output)
|
||||
}
|
||||
// output may be for a package, which won't have an entry
|
||||
// in testOutputBuffer because packages don't have a "run"
|
||||
// action.
|
||||
|
63
vendor/github.com/aristanetworks/goarista/cmd/json2test/main_test.go
generated
vendored
63
vendor/github.com/aristanetworks/goarista/cmd/json2test/main_test.go
generated
vendored
@ -6,36 +6,53 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/kylelemons/godebug/diff"
|
||||
)
|
||||
|
||||
func TestWriteTestOutput(t *testing.T) {
|
||||
input, err := os.Open("testdata/input.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
if err := writeTestOutput(input, &out); err != errTestFailure {
|
||||
t.Error("expected test failure")
|
||||
}
|
||||
for name, tc := range map[string]struct {
|
||||
verbose bool
|
||||
inputFile string
|
||||
goldFile string
|
||||
}{
|
||||
"quiet": {
|
||||
verbose: false,
|
||||
inputFile: "testdata/input.txt",
|
||||
goldFile: "testdata/gold-quiet.txt",
|
||||
},
|
||||
"verbose": {
|
||||
verbose: true,
|
||||
inputFile: "testdata/input.txt",
|
||||
goldFile: "testdata/gold-verbose.txt",
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
input, err := os.Open(tc.inputFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
if err := writeTestOutput(input, &out, tc.verbose); err != errTestFailure {
|
||||
t.Error("expected test failure")
|
||||
}
|
||||
|
||||
gold, err := os.Open("testdata/gold.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected, err := ioutil.ReadAll(gold)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gold, err := os.Open(tc.goldFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expected, err := ioutil.ReadAll(gold)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(out.Bytes(), expected) {
|
||||
t.Error("output does not match gold.txt")
|
||||
fmt.Println("Expected:")
|
||||
fmt.Println(string(expected))
|
||||
fmt.Println("Got:")
|
||||
fmt.Println(out.String())
|
||||
if !bytes.Equal(out.Bytes(), expected) {
|
||||
t.Errorf("output does not match %s", tc.goldFile)
|
||||
t.Error("\n" + diff.Diff(string(expected), out.String()))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
12
vendor/github.com/aristanetworks/goarista/cmd/json2test/testdata/gold-quiet.txt
generated
vendored
Normal file
12
vendor/github.com/aristanetworks/goarista/cmd/json2test/testdata/gold-quiet.txt
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
ok pkg/passed 0.013s
|
||||
--- FAIL: TestPanic (600.029)
|
||||
--- FAIL: TestFail (0.180)
|
||||
FAIL pkg/failed 0.204s
|
||||
|
||||
Test failures:
|
||||
[1] pkg/panic.TestPanic
|
||||
panic
|
||||
FAIL pkg/panic 600.029s
|
||||
|
||||
[2] pkg/failed.TestFail
|
||||
--- FAIL: TestFail (0.18s)
|
13
vendor/github.com/aristanetworks/goarista/cmd/ockafka/main.go
generated
vendored
13
vendor/github.com/aristanetworks/goarista/cmd/ockafka/main.go
generated
vendored
@ -23,7 +23,8 @@ import (
|
||||
)
|
||||
|
||||
var keysFlag = flag.String("kafkakeys", "",
|
||||
"Keys for kafka messages (comma-separated, default: the value of -addrs")
|
||||
"Keys for kafka messages (comma-separated, default: the value of -addrs). The key '"+
|
||||
client.HostnameArg+"' is replaced by the current hostname.")
|
||||
|
||||
func newProducer(addresses []string, topic, key, dataset string) (producer.Producer, error) {
|
||||
encodedKey := sarama.StringEncoder(key)
|
||||
@ -38,10 +39,16 @@ func newProducer(addresses []string, topic, key, dataset string) (producer.Produ
|
||||
func main() {
|
||||
username, password, subscriptions, grpcAddrs, opts := client.ParseFlags()
|
||||
|
||||
var keys []string
|
||||
var err error
|
||||
if *keysFlag == "" {
|
||||
*keysFlag = strings.Join(grpcAddrs, ",")
|
||||
keys = grpcAddrs
|
||||
} else {
|
||||
keys, err = client.ParseHostnames(*keysFlag)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
keys := strings.Split(*keysFlag, ",")
|
||||
if len(grpcAddrs) != len(keys) {
|
||||
glog.Fatal("Please provide the same number of addresses and Kafka keys")
|
||||
}
|
||||
|
2
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/README.md
generated
vendored
2
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/README.md
generated
vendored
@ -15,7 +15,7 @@ metrics:
|
||||
- name: tempSensor
|
||||
path: /Sysdb/environment/temperature/status/tempSensor/(?P<sensor>.+)/(?P<type>(?:maxT|t)emperature)/value
|
||||
help: Temperature and Maximum Temperature
|
||||
...
|
||||
# ...
|
||||
```
|
||||
|
||||
Applied to an update for the path
|
||||
|
72
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector.go
generated
vendored
72
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector.go
generated
vendored
@ -6,11 +6,14 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
@ -59,25 +62,29 @@ func (c *collector) update(addr string, message proto.Message) {
|
||||
}
|
||||
|
||||
device := strings.Split(addr, ":")[0]
|
||||
prefix := "/" + strings.Join(notif.Prefix.Element, "/")
|
||||
prefix := gnmi.StrPath(notif.Prefix)
|
||||
// Process deletes first
|
||||
for _, del := range notif.Delete {
|
||||
path := prefix + "/" + strings.Join(del.Element, "/")
|
||||
path := prefix + gnmi.StrPath(del)
|
||||
key := source{addr: device, path: path}
|
||||
c.m.Lock()
|
||||
delete(c.metrics, key)
|
||||
if _, ok := c.metrics[key]; ok {
|
||||
delete(c.metrics, key)
|
||||
} else {
|
||||
// TODO: replace this with a prefix tree
|
||||
p := path + "/"
|
||||
for k := range c.metrics {
|
||||
if k.addr == device && strings.HasPrefix(k.path, p) {
|
||||
delete(c.metrics, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.m.Unlock()
|
||||
}
|
||||
|
||||
// Process updates next
|
||||
for _, update := range notif.Update {
|
||||
// We only use JSON encoded values
|
||||
if update.Value == nil || update.Value.Type != pb.Encoding_JSON {
|
||||
glog.V(9).Infof("Ignoring incompatible update value in %s", update)
|
||||
continue
|
||||
}
|
||||
|
||||
path := prefix + "/" + strings.Join(update.Path.Element, "/")
|
||||
path := prefix + gnmi.StrPath(update.Path)
|
||||
value, suffix, ok := parseValue(update)
|
||||
if !ok {
|
||||
continue
|
||||
@ -125,7 +132,8 @@ func (c *collector) update(addr string, message proto.Message) {
|
||||
// Get the descriptor and labels for this source
|
||||
metric := c.config.getMetricValues(src)
|
||||
if metric == nil || metric.desc == nil {
|
||||
glog.V(8).Infof("Ignoring unmatched update at %s:%s: %+v", device, path, update.Value)
|
||||
glog.V(8).Infof("Ignoring unmatched update %v at %s:%s with value %+v",
|
||||
update, device, path, value)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -153,26 +161,50 @@ func (c *collector) update(addr string, message proto.Message) {
|
||||
}
|
||||
}
|
||||
|
||||
// ParseValue takes in an update and parses a value and suffix
|
||||
// parseValue takes in an update and parses a value and suffix
|
||||
// Returns an interface that contains either a string or a float64 as well as a suffix
|
||||
// Unparseable updates return (0, empty string, false)
|
||||
func parseValue(update *pb.Update) (interface{}, string, bool) {
|
||||
var intf interface{}
|
||||
if err := json.Unmarshal(update.Value.Value, &intf); err != nil {
|
||||
glog.Errorf("Can't parse value in update %v: %v", update, err)
|
||||
intf, err := gnmi.ExtractValue(update)
|
||||
if err != nil {
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
switch value := intf.(type) {
|
||||
case float64:
|
||||
return value, "", true
|
||||
// float64 or string expected as the return value
|
||||
case int64:
|
||||
return float64(value), "", true
|
||||
case uint64:
|
||||
return float64(value), "", true
|
||||
case float32:
|
||||
return float64(value), "", true
|
||||
case *pb.Decimal64:
|
||||
val := gnmi.DecimalToFloat(value)
|
||||
if math.IsInf(val, 0) || math.IsNaN(val) {
|
||||
return 0, "", false
|
||||
}
|
||||
return val, "", true
|
||||
case json.Number:
|
||||
valFloat, err := value.Float64()
|
||||
if err != nil {
|
||||
return value, "", true
|
||||
}
|
||||
return valFloat, "", true
|
||||
case *any.Any:
|
||||
return value.String(), "", true
|
||||
case []interface{}:
|
||||
// extract string represetation for now
|
||||
return gnmi.StrVal(update.Val), "", false
|
||||
case map[string]interface{}:
|
||||
if vIntf, ok := value["value"]; ok {
|
||||
if val, ok := vIntf.(float64); ok {
|
||||
return val, "value", true
|
||||
if num, ok := vIntf.(json.Number); ok {
|
||||
valFloat, err := num.Float64()
|
||||
if err != nil {
|
||||
return num, "value", true
|
||||
}
|
||||
return valFloat, "value", true
|
||||
}
|
||||
}
|
||||
// float64 or string expected as the return value
|
||||
case bool:
|
||||
if value {
|
||||
return float64(1), "", true
|
||||
|
209
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector_test.go
generated
vendored
209
vendor/github.com/aristanetworks/goarista/cmd/ocprometheus/collector_test.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -66,9 +67,9 @@ func makeMetrics(cfg *Config, expValues map[source]float64, notification *pb.Not
|
||||
}
|
||||
|
||||
func findUpdate(notif *pb.Notification, path string) (*pb.Update, error) {
|
||||
prefix := notif.Prefix.Element
|
||||
prefix := notif.Prefix
|
||||
for _, v := range notif.Update {
|
||||
fullPath := "/" + strings.Join(append(prefix, v.Path.Element...), "/")
|
||||
fullPath := gnmi.StrPath(gnmi.JoinPaths(prefix, v.Path))
|
||||
if strings.Contains(path, fullPath) || path == fullPath {
|
||||
return v, nil
|
||||
}
|
||||
@ -82,6 +83,15 @@ func makeResponse(notif *pb.Notification) *pb.SubscribeResponse {
|
||||
}
|
||||
}
|
||||
|
||||
func makePath(pathStr string) *pb.Path {
|
||||
splitPath := gnmi.SplitPath(pathStr)
|
||||
path, err := gnmi.ParseGNMIElements(splitPath)
|
||||
if err != nil {
|
||||
return &pb.Path{}
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
config := []byte(`
|
||||
devicelabels:
|
||||
@ -117,43 +127,30 @@ metrics:
|
||||
coll := newCollector(cfg)
|
||||
|
||||
notif := &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Prefix: makePath("Sysdb"),
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("42"),
|
||||
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("42")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "cooling", "status", "fan", "speed"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("{\"value\": 45}"),
|
||||
Path: makePath("environment/cooling/status/fan/speed"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("{\"value\": 45}")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"igmpsnooping", "vlanStatus", "2050", "ethGroup",
|
||||
"01:00:5e:01:01:01", "intf", "Cpu"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("true"),
|
||||
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "cooling", "status", "fan", "name"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("\"Fan1.1\""),
|
||||
Path: makePath("environment/cooling/status/fan/name"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("\"Fan1.1\"")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -185,33 +182,24 @@ metrics:
|
||||
|
||||
// Update two values, and one path which is not a metric
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Prefix: makePath("Sysdb"),
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("52"),
|
||||
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("52")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "cooling", "status", "fan", "name"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("\"Fan2.1\""),
|
||||
Path: makePath("environment/cooling/status/fan/name"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("\"Fan2.1\"")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"environment", "doesntexist", "status"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("{\"value\": 45}"),
|
||||
Path: makePath("environment/doesntexist/status"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("{\"value\": 45}")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -230,15 +218,12 @@ metrics:
|
||||
|
||||
// Same path, different device
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Prefix: makePath("Sysdb"),
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("42"),
|
||||
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("42")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -254,12 +239,8 @@ metrics:
|
||||
|
||||
// Delete a path
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Delete: []*pb.Path{
|
||||
{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
},
|
||||
Prefix: makePath("Sysdb"),
|
||||
Delete: []*pb.Path{makePath("lag/intfCounterDir/Ethernet1/intfCounter")},
|
||||
}
|
||||
src.addr = "10.1.1.1"
|
||||
delete(expValues, src)
|
||||
@ -272,15 +253,12 @@ metrics:
|
||||
|
||||
// Non-numeric update to path without value label
|
||||
notif = &pb.Notification{
|
||||
Prefix: &pb.Path{Element: []string{"Sysdb"}},
|
||||
Prefix: makePath("Sysdb"),
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: &pb.Path{
|
||||
Element: []string{"lag", "intfCounterDir", "Ethernet1", "intfCounter"},
|
||||
},
|
||||
Value: &pb.Value{
|
||||
Type: pb.Encoding_JSON,
|
||||
Value: []byte("\"test\""),
|
||||
Path: makePath("lag/intfCounterDir/Ethernet1/intfCounter"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("\"test\"")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -292,3 +270,100 @@ metrics:
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoalescedDelete(t *testing.T) {
|
||||
config := []byte(`
|
||||
devicelabels:
|
||||
10.1.1.1:
|
||||
lab1: val1
|
||||
lab2: val2
|
||||
'*':
|
||||
lab1: val3
|
||||
lab2: val4
|
||||
subscriptions:
|
||||
- /Sysdb/environment/cooling/status
|
||||
- /Sysdb/environment/power/status
|
||||
- /Sysdb/bridging/igmpsnooping/forwarding/forwarding/status
|
||||
metrics:
|
||||
- name: fanName
|
||||
path: /Sysdb/environment/cooling/status/fan/name
|
||||
help: Fan Name
|
||||
valuelabel: name
|
||||
defaultvalue: 2.5
|
||||
- name: intfCounter
|
||||
path: /Sysdb/(lag|slice/phy/.+)/intfCounterDir/(?P<intf>.+)/intfCounter
|
||||
help: Per-Interface Bytes/Errors/Discards Counters
|
||||
- name: fanSpeed
|
||||
path: /Sysdb/environment/cooling/status/fan/speed/value
|
||||
help: Fan Speed
|
||||
- name: igmpSnoopingInf
|
||||
path: /Sysdb/igmpsnooping/vlanStatus/(?P<vlan>.+)/ethGroup/(?P<mac>.+)/intf/(?P<intf>.+)
|
||||
help: IGMP snooping status`)
|
||||
cfg, err := parseConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
coll := newCollector(cfg)
|
||||
|
||||
notif := &pb.Notification{
|
||||
Prefix: makePath("Sysdb"),
|
||||
Update: []*pb.Update{
|
||||
{
|
||||
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02/intf/Cpu"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:03/intf/Cpu"),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte("true")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expValues := map[source]float64{
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:01/intf/Cpu",
|
||||
}: 1,
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02/intf/Cpu",
|
||||
}: 1,
|
||||
{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:03/intf/Cpu",
|
||||
}: 1,
|
||||
}
|
||||
|
||||
coll.update("10.1.1.1:6042", makeResponse(notif))
|
||||
expMetrics := makeMetrics(cfg, expValues, notif, nil)
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
|
||||
// Delete a subtree
|
||||
notif = &pb.Notification{
|
||||
Prefix: makePath("Sysdb"),
|
||||
Delete: []*pb.Path{makePath("igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02")},
|
||||
}
|
||||
src := source{
|
||||
addr: "10.1.1.1",
|
||||
path: "/Sysdb/igmpsnooping/vlanStatus/2050/ethGroup/01:00:5e:01:01:02/intf/Cpu",
|
||||
}
|
||||
delete(expValues, src)
|
||||
|
||||
coll.update("10.1.1.1:6042", makeResponse(notif))
|
||||
expMetrics = makeMetrics(cfg, expValues, notif, expMetrics)
|
||||
if !test.DeepEqual(expMetrics, coll.metrics) {
|
||||
t.Errorf("Mismatched metrics: %v", test.Diff(expMetrics, coll.metrics))
|
||||
}
|
||||
|
||||
}
|
||||
|
25
vendor/github.com/aristanetworks/goarista/cmd/ocredis/main.go
generated
vendored
25
vendor/github.com/aristanetworks/goarista/cmd/ocredis/main.go
generated
vendored
@ -129,7 +129,10 @@ func bufferToRedis(addr string, notif *pb.Notification) {
|
||||
pub := make(map[string]interface{}, len(notif.Update))
|
||||
for _, update := range notif.Update {
|
||||
key := joinPath(update.Path)
|
||||
value := convertUpdate(update)
|
||||
value, err := gnmi.ExtractValue(update)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to extract valid type from %#v", update)
|
||||
}
|
||||
pub[key] = value
|
||||
marshaledValue, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
@ -186,23 +189,5 @@ func redisPublish(path, kind string, payload interface{}) {
|
||||
}
|
||||
|
||||
func joinPath(path *pb.Path) string {
|
||||
// path.Elem is empty for some reason so using path.Element instead
|
||||
return strings.Join(path.Element, "/")
|
||||
}
|
||||
|
||||
func convertUpdate(update *pb.Update) interface{} {
|
||||
switch update.Value.Type {
|
||||
case pb.Encoding_JSON:
|
||||
var value interface{}
|
||||
err := json.Unmarshal(update.Value.Value, &value)
|
||||
if err != nil {
|
||||
glog.Fatalf("Malformed JSON update %q in %s", update.Value.Value, update)
|
||||
}
|
||||
return value
|
||||
case pb.Encoding_BYTES:
|
||||
return update.Value.Value
|
||||
default:
|
||||
glog.Fatalf("Unhandled type of value %v in %s", update.Value.Type, update)
|
||||
return nil
|
||||
}
|
||||
return gnmi.StrPath(path)
|
||||
}
|
||||
|
3
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config.go
generated
vendored
3
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config.go
generated
vendored
@ -60,11 +60,12 @@ func loadConfig(path string) (*Config, error) {
|
||||
func (c *Config) Match(path string) (metricName string, tags map[string]string) {
|
||||
tags = make(map[string]string)
|
||||
|
||||
for _, metric := range c.Metrics {
|
||||
for name, metric := range c.Metrics {
|
||||
found := metric.re.FindStringSubmatch(path)
|
||||
if found == nil {
|
||||
continue
|
||||
}
|
||||
metricName = name
|
||||
for i, name := range metric.re.SubexpNames() {
|
||||
if i == 0 {
|
||||
continue
|
||||
|
16
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config_test.go
generated
vendored
16
vendor/github.com/aristanetworks/goarista/cmd/octsdb/config_test.go
generated
vendored
@ -26,34 +26,34 @@ func TestConfig(t *testing.T) {
|
||||
tags map[string]string
|
||||
}{{
|
||||
path: "/Sysdb/environment/cooling/status/fan/Fan1/1/speed/value",
|
||||
metric: "eos.environment.fan.speed",
|
||||
metric: "eos.fanspeed.environment.fan.speed",
|
||||
tags: map[string]string{"fan": "Fan1/1"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/power/status/powerSupply/PowerSupply2/outputPower/value",
|
||||
metric: "eos.environment.power.output",
|
||||
metric: "eos.powersensor.environment.power.output",
|
||||
tags: map[string]string{"sensor": "PowerSupply2"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/power/status/voltageSensor/VoltageSensor23/voltage/value",
|
||||
metric: "eos.environment.voltage",
|
||||
metric: "eos.voltagesensor.environment.voltage",
|
||||
tags: map[string]string{"sensor": "VoltageSensor23"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/power/status/currentSensor/CurrentSensorP2/1/current/value",
|
||||
metric: "eos.environment.current",
|
||||
metric: "eos.currentsensor.environment.current",
|
||||
tags: map[string]string{"sensor": "CurrentSensorP2/1"},
|
||||
}, {
|
||||
path: "/Sysdb/environment/temperature/status/tempSensor/" +
|
||||
"TempSensorP2/1/maxTemperature/value",
|
||||
metric: "eos.environment.maxtemperature",
|
||||
metric: "eos.tempsensor.environment.maxtemperature",
|
||||
tags: map[string]string{"sensor": "TempSensorP2/1"},
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/lag/intfCounterDir/" +
|
||||
"Port-Channel201/intfCounter/current/statistics/outUcastPkts",
|
||||
metric: "eos.interface.pkt",
|
||||
metric: "eos.intfpktcounter.interface.pkt",
|
||||
tags: map[string]string{"intf": "Port-Channel201", "direction": "out", "type": "Ucast"},
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
|
||||
"Ethernet42/intfCounter/current/statistics/inUcastPkts",
|
||||
metric: "eos.interface.pkt",
|
||||
metric: "eos.intfpktcounter.interface.pkt",
|
||||
tags: map[string]string{"intf": "Ethernet42", "direction": "in", "type": "Ucast"},
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
|
||||
@ -61,7 +61,7 @@ func TestConfig(t *testing.T) {
|
||||
}, {
|
||||
path: "/Sysdb/interface/counter/eth/slice/phy/1/intfCounterDir/" +
|
||||
"Ethernet42/intfCounter/current/ethStatistics/outPfcClassFrames",
|
||||
metric: "eos.interface.pfcclassframes",
|
||||
metric: "eos.intfpfcclasscounter.interface.pfcclassframes",
|
||||
tags: map[string]string{"intf": "Ethernet42", "direction": "out"},
|
||||
}}
|
||||
for i, tcase := range testcases {
|
||||
|
52
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main.go
generated
vendored
52
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main.go
generated
vendored
@ -6,10 +6,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -135,20 +135,16 @@ func pushToOpenTSDB(addr string, conn OpenTSDBConn, config *Config, notif *pb.No
|
||||
return
|
||||
}
|
||||
}
|
||||
prefix := "/" + strings.Join(notif.Prefix.Element, "/")
|
||||
prefix := gnmi.StrPath(notif.Prefix)
|
||||
for _, update := range notif.Update {
|
||||
if update.Value == nil || update.Value.Type != pb.Encoding_JSON {
|
||||
glog.V(9).Infof("Ignoring incompatible update value in %s", update)
|
||||
continue
|
||||
}
|
||||
value := parseValue(update)
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
path := prefix + "/" + strings.Join(update.Path.Element, "/")
|
||||
path := prefix + gnmi.StrPath(update.Path)
|
||||
metricName, tags := config.Match(path)
|
||||
if metricName == "" {
|
||||
glog.V(8).Infof("Ignoring unmatched update at %s: %+v", path, update.Value)
|
||||
glog.V(8).Infof("Ignoring unmatched update at %s with value %+v", path, value)
|
||||
continue
|
||||
}
|
||||
tags["host"] = host
|
||||
@ -173,28 +169,48 @@ func pushToOpenTSDB(addr string, conn OpenTSDBConn, config *Config, notif *pb.No
|
||||
// the value is a slice of integers/floating point values. If the value is neither of these
|
||||
// or if any element in the slice is non numerical, parseValue returns nil.
|
||||
func parseValue(update *pb.Update) []interface{} {
|
||||
var value interface{}
|
||||
|
||||
decoder := json.NewDecoder(bytes.NewReader(update.Value.Value))
|
||||
decoder.UseNumber()
|
||||
err := decoder.Decode(&value)
|
||||
value, err := gnmi.ExtractValue(update)
|
||||
if err != nil {
|
||||
glog.Fatalf("Malformed JSON update %q in %s", update.Value.Value, update)
|
||||
glog.Fatalf("Malformed JSON update %q in %s", update.Val.GetJsonVal(), update)
|
||||
}
|
||||
|
||||
switch value := value.(type) {
|
||||
case int64:
|
||||
return []interface{}{value}
|
||||
case uint64:
|
||||
return []interface{}{value}
|
||||
case float32:
|
||||
return []interface{}{value}
|
||||
case *pb.Decimal64:
|
||||
val := gnmi.DecimalToFloat(value)
|
||||
if math.IsInf(val, 0) || math.IsNaN(val) {
|
||||
return nil
|
||||
}
|
||||
return []interface{}{val}
|
||||
case json.Number:
|
||||
return []interface{}{parseNumber(value, update)}
|
||||
case []interface{}:
|
||||
for i, val := range value {
|
||||
jsonNum, ok := val.(json.Number)
|
||||
if !ok {
|
||||
switch val := val.(type) {
|
||||
case int64:
|
||||
value[i] = val
|
||||
case uint64:
|
||||
value[i] = val
|
||||
case float32:
|
||||
value[i] = val
|
||||
case *pb.Decimal64:
|
||||
v := gnmi.DecimalToFloat(val)
|
||||
if math.IsInf(v, 0) || math.IsNaN(v) {
|
||||
value[i] = nil
|
||||
}
|
||||
value[i] = v
|
||||
case json.Number:
|
||||
value[i] = parseNumber(val, update)
|
||||
default:
|
||||
// If any value is not a number, skip it.
|
||||
glog.Infof("Element %d: %v is %T, not json.Number", i, val, val)
|
||||
continue
|
||||
}
|
||||
num := parseNumber(jsonNum, update)
|
||||
value[i] = num
|
||||
}
|
||||
return value
|
||||
case map[string]interface{}:
|
||||
|
4
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main_test.go
generated
vendored
4
vendor/github.com/aristanetworks/goarista/cmd/octsdb/main_test.go
generated
vendored
@ -35,8 +35,8 @@ func TestParseValue(t *testing.T) { // Because parsing JSON sucks.
|
||||
}
|
||||
for i, tcase := range testcases {
|
||||
actual := parseValue(&pb.Update{
|
||||
Value: &pb.Value{
|
||||
Value: []byte(tcase.input),
|
||||
Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte(tcase.input)},
|
||||
},
|
||||
})
|
||||
if d := test.Diff(tcase.expected, actual); d != "" {
|
||||
|
2
vendor/github.com/aristanetworks/goarista/cmd/octsdb/tsdb.go
generated
vendored
2
vendor/github.com/aristanetworks/goarista/cmd/octsdb/tsdb.go
generated
vendored
@ -28,7 +28,7 @@ func (d *DataPoint) String() string {
|
||||
tags += " " + tag + "=" + value
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("put %s %d %#v%s\n", d.Metric, d.Timestamp/1e9, d.Value, tags)
|
||||
return fmt.Sprintf("put %s %d %v%s\n", d.Metric, d.Timestamp/1e9, d.Value, tags)
|
||||
}
|
||||
|
||||
// OpenTSDBConn is a managed connection to an OpenTSDB instance (or cluster).
|
||||
|
243
vendor/github.com/aristanetworks/goarista/cmd/test2influxdb/main.go
generated
vendored
243
vendor/github.com/aristanetworks/goarista/cmd/test2influxdb/main.go
generated
vendored
@ -27,6 +27,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
@ -37,7 +38,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
client "github.com/influxdata/influxdb/client/v2"
|
||||
client "github.com/influxdata/influxdb1-client/v2"
|
||||
"golang.org/x/tools/benchmark/parse"
|
||||
)
|
||||
|
||||
const (
|
||||
// Benchmark field names
|
||||
fieldNsPerOp = "nsPerOp"
|
||||
fieldAllocedBytesPerOp = "allocedBytesPerOp"
|
||||
fieldAllocsPerOp = "allocsPerOp"
|
||||
fieldMBPerS = "MBPerSec"
|
||||
)
|
||||
|
||||
type tag struct {
|
||||
@ -138,11 +148,30 @@ var (
|
||||
flagAddr = flag.String("addr", "http://localhost:8086", "adddress of influxdb database")
|
||||
flagDB = flag.String("db", "gotest", "use `database` in influxdb")
|
||||
flagMeasurement = flag.String("m", "result", "`measurement` used in influxdb database")
|
||||
flagBenchOnly = flag.Bool("bench", false, "if true, parses and stores benchmark "+
|
||||
"output only while ignoring test results")
|
||||
|
||||
flagTags tags
|
||||
flagFields fields
|
||||
)
|
||||
|
||||
type duplicateTestsErr map[string][]string // package to tests
|
||||
|
||||
func (dte duplicateTestsErr) Error() string {
|
||||
var b bytes.Buffer
|
||||
if _, err := b.WriteString("duplicate tests found:"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for pkg, tests := range dte {
|
||||
if _, err := b.WriteString(
|
||||
fmt.Sprintf("\n\t%s: %s", pkg, strings.Join(tests, " ")),
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Var(&flagTags, "tags", "set additional `tags`. Ex: name=alice,food=pasta")
|
||||
flag.Var(&flagFields, "fields", "set additional `fields`. Ex: id=1234i,long=34.123,lat=72.234")
|
||||
@ -158,18 +187,36 @@ func main() {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
if err := run(c, os.Stdin); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func run(c client.Client, r io.Reader) error {
|
||||
batch, err := client.NewBatchPoints(client.BatchPointsConfig{Database: *flagDB})
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := parseTestOutput(os.Stdin, batch); err != nil {
|
||||
glog.Fatal(err)
|
||||
var parseErr error
|
||||
if *flagBenchOnly {
|
||||
parseErr = parseBenchmarkOutput(r, batch)
|
||||
} else {
|
||||
parseErr = parseTestOutput(r, batch)
|
||||
}
|
||||
|
||||
if err := c.Write(batch); err != nil {
|
||||
glog.Fatal(err)
|
||||
// Partial results can still be published with certain parsing errors like
|
||||
// duplicate test names.
|
||||
// The process still exits with a non-zero code in this case.
|
||||
switch parseErr.(type) {
|
||||
case nil, duplicateTestsErr:
|
||||
if err := c.Write(batch); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Infof("wrote %d data points", len(batch.Points()))
|
||||
}
|
||||
|
||||
return parseErr
|
||||
}
|
||||
|
||||
// See https://golang.org/cmd/test2json/ for a description of 'go test
|
||||
@ -284,3 +331,187 @@ func parseTestOutput(r io.Reader, batch client.BatchPoints) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createBenchmarkTags(pkg string, b *parse.Benchmark) map[string]string {
|
||||
tags := make(map[string]string, len(flagTags)+2)
|
||||
for _, t := range flagTags {
|
||||
tags[t.key] = t.value
|
||||
}
|
||||
tags["package"] = pkg
|
||||
tags["benchmark"] = b.Name
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
func createBenchmarkFields(b *parse.Benchmark) map[string]interface{} {
|
||||
fields := make(map[string]interface{}, len(flagFields)+4)
|
||||
for _, f := range flagFields {
|
||||
fields[f.key] = f.value
|
||||
}
|
||||
|
||||
if b.Measured&parse.NsPerOp != 0 {
|
||||
fields[fieldNsPerOp] = b.NsPerOp
|
||||
}
|
||||
if b.Measured&parse.AllocedBytesPerOp != 0 {
|
||||
fields[fieldAllocedBytesPerOp] = float64(b.AllocedBytesPerOp)
|
||||
}
|
||||
if b.Measured&parse.AllocsPerOp != 0 {
|
||||
fields[fieldAllocsPerOp] = float64(b.AllocsPerOp)
|
||||
}
|
||||
if b.Measured&parse.MBPerS != 0 {
|
||||
fields[fieldMBPerS] = b.MBPerS
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func parseBenchmarkOutput(r io.Reader, batch client.BatchPoints) error {
|
||||
// Unfortunately, test2json is not very reliable when it comes to benchmarks. At least
|
||||
// the following issues exist:
|
||||
//
|
||||
// - It doesn't guarantee a "pass" action for each successful benchmark test
|
||||
// - It might misreport the name of a benchmark (i.e. "Test" field)
|
||||
// See https://github.com/golang/go/issues/27764.
|
||||
// This happens for example when a benchmark panics: it might use the name
|
||||
// of the preceeding benchmark from the same package that run
|
||||
//
|
||||
// The main useful element of the json data is that it separates the output by package,
|
||||
// which complements the features in https://godoc.org/golang.org/x/tools/benchmark/parse
|
||||
|
||||
// Non-benchmark output from libraries like glog can interfere with benchmark result
|
||||
// parsing. filterOutputLine tries to filter out this extraneous info.
|
||||
// It returns a tuple with the output to parse and the name of the benchmark
|
||||
// if it is in the testEvent.
|
||||
filterOutputLine := func(e *testEvent) (string, string) {
|
||||
// The benchmark name is in the output of a separate test event.
|
||||
// It may be suffixed with non-benchmark-related logged output.
|
||||
// So if e.Output is
|
||||
// "BenchmarkFoo \tIrrelevant output"
|
||||
// then here we return
|
||||
// "BenchmarkFoo \t"
|
||||
if strings.HasPrefix(e.Output, "Benchmark") {
|
||||
if split := strings.SplitAfterN(e.Output, "\t", 2); len(split) == 2 {
|
||||
// Filter out output like "Benchmarking foo\t"
|
||||
if words := strings.Fields(split[0]); len(words) == 1 {
|
||||
return split[0], words[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
if strings.Contains(e.Output, "ns/op\t") {
|
||||
return e.Output, ""
|
||||
}
|
||||
if strings.Contains(e.Output, "B/op\t") {
|
||||
return e.Output, ""
|
||||
}
|
||||
if strings.Contains(e.Output, "allocs/op\t") {
|
||||
return e.Output, ""
|
||||
}
|
||||
if strings.Contains(e.Output, "MB/s\t") {
|
||||
return e.Output, ""
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Extract output per package.
|
||||
type pkgOutput struct {
|
||||
output bytes.Buffer
|
||||
timestamps map[string]time.Time
|
||||
}
|
||||
outputByPkg := make(map[string]*pkgOutput)
|
||||
d := json.NewDecoder(r)
|
||||
for {
|
||||
e := &testEvent{}
|
||||
if err := d.Decode(e); err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if e.Package == "" {
|
||||
return fmt.Errorf("empty package name for event %v", e)
|
||||
}
|
||||
if e.Time.IsZero() {
|
||||
return fmt.Errorf("zero timestamp for event %v", e)
|
||||
}
|
||||
|
||||
line, bname := filterOutputLine(e)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
po, ok := outputByPkg[e.Package]
|
||||
if !ok {
|
||||
po = &pkgOutput{timestamps: make(map[string]time.Time)}
|
||||
outputByPkg[e.Package] = po
|
||||
}
|
||||
po.output.WriteString(line)
|
||||
|
||||
if bname != "" {
|
||||
po.timestamps[bname] = e.Time
|
||||
}
|
||||
}
|
||||
|
||||
// Extract benchmark info from output
|
||||
type pkgBenchmarks struct {
|
||||
benchmarks []*parse.Benchmark
|
||||
timestamps map[string]time.Time
|
||||
}
|
||||
benchmarksPerPkg := make(map[string]*pkgBenchmarks)
|
||||
dups := make(duplicateTestsErr)
|
||||
for pkg, po := range outputByPkg {
|
||||
glog.V(5).Infof("Package %s output:\n%s", pkg, &po.output)
|
||||
|
||||
set, err := parse.ParseSet(&po.output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing package %s: %s", pkg, err)
|
||||
}
|
||||
|
||||
for name, benchmarks := range set {
|
||||
switch len(benchmarks) {
|
||||
case 0:
|
||||
case 1:
|
||||
pb, ok := benchmarksPerPkg[pkg]
|
||||
if !ok {
|
||||
pb = &pkgBenchmarks{timestamps: po.timestamps}
|
||||
benchmarksPerPkg[pkg] = pb
|
||||
}
|
||||
pb.benchmarks = append(pb.benchmarks, benchmarks[0])
|
||||
default:
|
||||
dups[pkg] = append(dups[pkg], name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add a point per benchmark
|
||||
for pkg, pb := range benchmarksPerPkg {
|
||||
for _, bm := range pb.benchmarks {
|
||||
t, ok := pb.timestamps[bm.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("implementation error: no timestamp for benchmark %s "+
|
||||
"in package %s", bm.Name, pkg)
|
||||
}
|
||||
|
||||
tags := createBenchmarkTags(pkg, bm)
|
||||
fields := createBenchmarkFields(bm)
|
||||
point, err := client.NewPoint(
|
||||
*flagMeasurement,
|
||||
tags,
|
||||
fields,
|
||||
t,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batch.AddPoint(point)
|
||||
glog.V(5).Infof("point: %s", point)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("Parsed %d benchmarks from %d packages",
|
||||
len(batch.Points()), len(benchmarksPerPkg))
|
||||
|
||||
if len(dups) > 0 {
|
||||
return dups
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
142
vendor/github.com/aristanetworks/goarista/cmd/test2influxdb/main_test.go
generated
vendored
142
vendor/github.com/aristanetworks/goarista/cmd/test2influxdb/main_test.go
generated
vendored
@ -10,9 +10,34 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
client "github.com/influxdata/influxdb1-client/v2"
|
||||
)
|
||||
|
||||
type mockedConn struct {
|
||||
bp client.BatchPoints
|
||||
}
|
||||
|
||||
func (m *mockedConn) Ping(timeout time.Duration) (time.Duration, string, error) {
|
||||
return time.Duration(0), "", nil
|
||||
}
|
||||
|
||||
func (m *mockedConn) Write(bp client.BatchPoints) error {
|
||||
m.bp = bp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockedConn) Query(q client.Query) (*client.Response, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockedConn) QueryAsChunk(q client.Query) (*client.ChunkedResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockedConn) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newPoint(t *testing.T, measurement string, tags map[string]string,
|
||||
fields map[string]interface{}, timeString string) *client.Point {
|
||||
t.Helper()
|
||||
@ -27,7 +52,7 @@ func newPoint(t *testing.T, measurement string, tags map[string]string,
|
||||
return p
|
||||
}
|
||||
|
||||
func TestParseTestOutput(t *testing.T) {
|
||||
func TestRunWithTestData(t *testing.T) {
|
||||
// Verify tags and fields set by flags are set in records
|
||||
flagTags.Set("tag=foo")
|
||||
flagFields.Set("field=true")
|
||||
@ -40,6 +65,7 @@ func TestParseTestOutput(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
makeTags := func(pkg, resultType string) map[string]string {
|
||||
return map[string]string{"package": pkg, "type": resultType, "tag": "foo"}
|
||||
@ -91,15 +117,12 @@ func TestParseTestOutput(t *testing.T) {
|
||||
),
|
||||
}
|
||||
|
||||
batch, err := client.NewBatchPoints(client.BatchPointsConfig{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := parseTestOutput(f, batch); err != nil {
|
||||
var mc mockedConn
|
||||
if err := run(&mc, f); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if diff := test.Diff(expected, batch.Points()); diff != "" {
|
||||
if diff := test.Diff(expected, mc.bp.Points()); diff != "" {
|
||||
t.Errorf("unexpected diff: %s", diff)
|
||||
}
|
||||
}
|
||||
@ -149,3 +172,106 @@ func TestFieldsFlag(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunWithBenchmarkData(t *testing.T) {
|
||||
// Verify tags and fields set by flags are set in records
|
||||
flagTags.Set("tag=foo")
|
||||
flagFields.Set("field=true")
|
||||
defaultMeasurement := *flagMeasurement
|
||||
*flagMeasurement = "benchmarks"
|
||||
*flagBenchOnly = true
|
||||
defer func() {
|
||||
flagTags = nil
|
||||
flagFields = nil
|
||||
*flagMeasurement = defaultMeasurement
|
||||
*flagBenchOnly = false
|
||||
}()
|
||||
|
||||
f, err := os.Open("testdata/bench-output.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
makeTags := func(pkg, benchmark string) map[string]string {
|
||||
return map[string]string{
|
||||
"package": pkg,
|
||||
"benchmark": benchmark,
|
||||
"tag": "foo",
|
||||
}
|
||||
}
|
||||
makeFields := func(nsPerOp, mbPerS, bPerOp, allocsPerOp float64) map[string]interface{} {
|
||||
m := map[string]interface{}{
|
||||
"field": true,
|
||||
}
|
||||
if nsPerOp > 0 {
|
||||
m[fieldNsPerOp] = nsPerOp
|
||||
}
|
||||
if mbPerS > 0 {
|
||||
m[fieldMBPerS] = mbPerS
|
||||
}
|
||||
if bPerOp > 0 {
|
||||
m[fieldAllocedBytesPerOp] = bPerOp
|
||||
}
|
||||
if allocsPerOp > 0 {
|
||||
m[fieldAllocsPerOp] = allocsPerOp
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
expected := []*client.Point{
|
||||
newPoint(t,
|
||||
"benchmarks",
|
||||
makeTags("arista/pkg", "BenchmarkPassed-8"),
|
||||
makeFields(127, 0, 16, 1),
|
||||
"2018-11-08T15:53:12.935603594-08:00",
|
||||
),
|
||||
newPoint(t,
|
||||
"benchmarks",
|
||||
makeTags("arista/pkg/subpkg1", "BenchmarkLogged-8"),
|
||||
makeFields(120, 0, 16, 1),
|
||||
"2018-11-08T15:53:14.359792815-08:00",
|
||||
),
|
||||
newPoint(t,
|
||||
"benchmarks",
|
||||
makeTags("arista/pkg/subpkg2", "BenchmarkSetBytes-8"),
|
||||
makeFields(120, 8.31, 16, 1),
|
||||
"2018-11-08T15:53:15.717036333-08:00",
|
||||
),
|
||||
newPoint(t,
|
||||
"benchmarks",
|
||||
makeTags("arista/pkg/subpkg3", "BenchmarkWithSubs/sub_1-8"),
|
||||
makeFields(118, 0, 16, 1),
|
||||
"2018-11-08T15:53:17.952644273-08:00",
|
||||
),
|
||||
newPoint(t,
|
||||
"benchmarks",
|
||||
makeTags("arista/pkg/subpkg3", "BenchmarkWithSubs/sub_2-8"),
|
||||
makeFields(117, 0, 16, 1),
|
||||
"2018-11-08T15:53:20.443187742-08:00",
|
||||
),
|
||||
}
|
||||
|
||||
var mc mockedConn
|
||||
err = run(&mc, f)
|
||||
switch err.(type) {
|
||||
case duplicateTestsErr:
|
||||
default:
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// parseBenchmarkOutput arranges the data in maps so the generated points
|
||||
// are in random order. Therefore, we're diffing as map instead of a slice
|
||||
pointsAsMap := func(points []*client.Point) map[string]*client.Point {
|
||||
m := make(map[string]*client.Point, len(points))
|
||||
for _, p := range points {
|
||||
m[p.String()] = p
|
||||
}
|
||||
return m
|
||||
}
|
||||
expectedMap := pointsAsMap(expected)
|
||||
actualMap := pointsAsMap(mc.bp.Points())
|
||||
if diff := test.Diff(expectedMap, actualMap); diff != "" {
|
||||
t.Errorf("unexpected diff: %s\nexpected: %v\nactual: %v", diff, expectedMap, actualMap)
|
||||
}
|
||||
}
|
||||
|
78
vendor/github.com/aristanetworks/goarista/cmd/test2influxdb/testdata/bench-output.txt
generated
vendored
Normal file
78
vendor/github.com/aristanetworks/goarista/cmd/test2influxdb/testdata/bench-output.txt
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
{"Time":"2018-11-08T15:53:12.935037854-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"--- FAIL: BenchmarkFailed\n"}
|
||||
{"Time":"2018-11-08T15:53:12.935531137-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"goos: darwin\n"}
|
||||
{"Time":"2018-11-08T15:53:12.93555869-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"goarch: amd64\n"}
|
||||
{"Time":"2018-11-08T15:53:12.935580755-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"pkg: arista/pkg\n"}
|
||||
{"Time":"2018-11-08T15:53:12.935603594-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"BenchmarkPassed-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:14.335388337-08:00","Action":"output","Package":"arista/pkg","Test":"BenchmarkFailed","Output":"10000000\t 127 ns/op\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:14.335478711-08:00","Action":"fail","Package":"arista/pkg","Test":"BenchmarkFailed"}
|
||||
{"Time":"2018-11-08T15:53:14.335497629-08:00","Action":"output","Package":"arista/pkg","Output":"FAIL\n"}
|
||||
{"Time":"2018-11-08T15:53:14.337028608-08:00","Action":"output","Package":"arista/pkg","Output":"exit status 1\n"}
|
||||
{"Time":"2018-11-08T15:53:14.337084646-08:00","Action":"output","Package":"arista/pkg","Output":"FAIL\tarista/pkg\t2.044s\n"}
|
||||
{"Time":"2018-11-08T15:53:14.33710102-08:00","Action":"fail","Package":"arista/pkg","Elapsed":2.044}
|
||||
{"Time":"2018-11-08T15:53:14.359620241-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.359313 66704 logged_test.go:12] glog info\n"}
|
||||
{"Time":"2018-11-08T15:53:14.359712288-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.359462 66704 logged_test.go:13] glog error\n"}
|
||||
{"Time":"2018-11-08T15:53:14.359732074-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"goos: darwin\n"}
|
||||
{"Time":"2018-11-08T15:53:14.359745657-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"goarch: amd64\n"}
|
||||
{"Time":"2018-11-08T15:53:14.359762336-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"pkg: arista/pkg/subpkg1\n"}
|
||||
{"Time":"2018-11-08T15:53:14.359792815-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"BenchmarkLogged-8 \tI1108 15:53:14.359720 66704 logged_test.go:12] glog info\n"}
|
||||
{"Time":"2018-11-08T15:53:14.359815431-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.359727 66704 logged_test.go:13] glog error\n"}
|
||||
{"Time":"2018-11-08T15:53:14.360021406-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.359982 66704 logged_test.go:12] glog info\n"}
|
||||
{"Time":"2018-11-08T15:53:14.360051256-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.359999 66704 logged_test.go:13] glog error\n"}
|
||||
{"Time":"2018-11-08T15:53:14.361552861-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.361525 66704 logged_test.go:12] glog info\n"}
|
||||
{"Time":"2018-11-08T15:53:14.361590887-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.361533 66704 logged_test.go:13] glog error\n"}
|
||||
{"Time":"2018-11-08T15:53:14.487176503-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"I1108 15:53:14.487111 66704 logged_test.go:12] glog info\n"}
|
||||
{"Time":"2018-11-08T15:53:14.487247658-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"E1108 15:53:14.487134 66704 logged_test.go:13] glog error\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689641721-08:00","Action":"output","Package":"arista/pkg/subpkg1","Output":"10000000\t 120 ns/op\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689748801-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"--- BENCH: BenchmarkLogged-8\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689816801-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689863374-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689912578-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689935701-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
|
||||
{"Time":"2018-11-08T15:53:15.689956703-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\tlogged_test.go:11: b.Logging\n"}
|
||||
{"Time":"2018-11-08T15:53:15.6900942-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"BenchmarkPanicked-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:15.692466953-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"panic: panicking\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692555542-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692608705-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"goroutine 10 [running]:\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692641147-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"arista/pkg/subpkg1.BenchmarkPanicked(0xc4200ecf00)\n"}
|
||||
{"Time":"2018-11-08T15:53:15.69266629-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/Users/rprada/go/src/arista/pkg/subpkg1/panicked_test.go:6 +0x39\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692697166-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"testing.(*B).runN(0xc4200ecf00, 0x1)\n"}
|
||||
{"Time":"2018-11-08T15:53:15.69271775-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/usr/local/Cellar/go/1.10.2/libexec/src/testing/benchmark.go:141 +0xb2\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692738882-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"testing.(*B).run1.func1(0xc4200ecf00)\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692756307-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/usr/local/Cellar/go/1.10.2/libexec/src/testing/benchmark.go:214 +0x5a\n"}
|
||||
{"Time":"2018-11-08T15:53:15.692773483-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"created by testing.(*B).run1\n"}
|
||||
{"Time":"2018-11-08T15:53:15.69279028-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"\t/usr/local/Cellar/go/1.10.2/libexec/src/testing/benchmark.go:207 +0x80\n"}
|
||||
{"Time":"2018-11-08T15:53:15.694213193-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"exit status 2\n"}
|
||||
{"Time":"2018-11-08T15:53:15.694269655-08:00","Action":"output","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Output":"FAIL\tarista/pkg/subpkg1\t1.356s\n"}
|
||||
{"Time":"2018-11-08T15:53:15.694289541-08:00","Action":"fail","Package":"arista/pkg/subpkg1","Test":"BenchmarkLogged-8","Elapsed":1.3559999999999999}
|
||||
{"Time":"2018-11-08T15:53:15.716909476-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"goos: darwin\n"}
|
||||
{"Time":"2018-11-08T15:53:15.71699479-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"goarch: amd64\n"}
|
||||
{"Time":"2018-11-08T15:53:15.717015107-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"pkg: arista/pkg/subpkg2\n"}
|
||||
{"Time":"2018-11-08T15:53:15.717036333-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"BenchmarkSetBytes-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:17.044040116-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"10000000\t 120 ns/op\t 8.31 MB/s\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:17.044540811-08:00","Action":"output","Package":"arista/pkg/subpkg2","Test":"BenchmarkSkipped","Output":"--- SKIP: BenchmarkSkipped\n"}
|
||||
{"Time":"2018-11-08T15:53:17.044584422-08:00","Action":"output","Package":"arista/pkg/subpkg2","Test":"BenchmarkSkipped","Output":"\tskipped_test.go:6: skipping\n"}
|
||||
{"Time":"2018-11-08T15:53:17.044626843-08:00","Action":"skip","Package":"arista/pkg/subpkg2","Test":"BenchmarkSkipped"}
|
||||
{"Time":"2018-11-08T15:53:17.044642947-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"PASS\n"}
|
||||
{"Time":"2018-11-08T15:53:17.04621203-08:00","Action":"output","Package":"arista/pkg/subpkg2","Output":"ok \tarista/pkg/subpkg2\t1.350s\n"}
|
||||
{"Time":"2018-11-08T15:53:17.046280101-08:00","Action":"pass","Package":"arista/pkg/subpkg2","Elapsed":1.351}
|
||||
{"Time":"2018-11-08T15:53:17.952468407-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"goos: darwin\n"}
|
||||
{"Time":"2018-11-08T15:53:17.952573911-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"goarch: amd64\n"}
|
||||
{"Time":"2018-11-08T15:53:17.952607727-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"pkg: arista/pkg/subpkg3\n"}
|
||||
{"Time":"2018-11-08T15:53:17.952644273-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"BenchmarkWithSubs/sub_1-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:20.442803179-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"20000000\t 118 ns/op\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:20.443187742-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"BenchmarkWithSubs/sub_2-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:21.743033457-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"10000000\t 117 ns/op\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:21.743118494-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"PASS\n"}
|
||||
{"Time":"2018-11-08T15:53:21.744485534-08:00","Action":"output","Package":"arista/pkg/subpkg3","Output":"ok \tarista/pkg/subpkg3\t4.697s\n"}
|
||||
{"Time":"2018-11-08T15:53:21.744547934-08:00","Action":"pass","Package":"arista/pkg/subpkg3","Elapsed":4.697}
|
||||
{"Time":"2018-11-08T15:53:22.952573911-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goos: darwin\n"}
|
||||
{"Time":"2018-11-08T15:53:22.952607727-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goarch: amd64\n"}
|
||||
{"Time":"2018-11-08T15:53:22.952644273-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"subpkg4: arista/subpkg4\n"}
|
||||
{"Time":"2018-11-08T15:53:23.442803179-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"BenchmarkDuplicate-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:23.443187742-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"10000000\t 127 ns/op\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:24.952573911-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goos: darwin\n"}
|
||||
{"Time":"2018-11-08T15:53:24.952607727-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"goarch: amd64\n"}
|
||||
{"Time":"2018-11-08T15:53:24.952644273-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"subpkg4: arista/subpkg4\n"}
|
||||
{"Time":"2018-11-08T15:53:25.442803179-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"BenchmarkDuplicate-8 \t"}
|
||||
{"Time":"2018-11-08T15:53:25.443187742-08:00","Action":"output","Package":"arista/subpkg4","Test":"BenchmarkDuplicate","Output":"10000000\t 127 ns/op\t 16 B/op\t 1 allocs/op\n"}
|
||||
{"Time":"2018-11-08T15:53:25.744547934-08:00","Action":"pass","Package":"arista/pkg/subpkg4","Elapsed":4.697}
|
57
vendor/github.com/aristanetworks/goarista/dscp/dial.go
generated
vendored
57
vendor/github.com/aristanetworks/goarista/dscp/dial.go
generated
vendored
@ -6,8 +6,8 @@
|
||||
package dscp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -15,15 +15,17 @@ import (
|
||||
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
|
||||
// of service flags to use for incoming connections.
|
||||
func DialTCPWithTOS(laddr, raddr *net.TCPAddr, tos byte) (*net.TCPConn, error) {
|
||||
conn, err := net.DialTCP("tcp", laddr, raddr)
|
||||
d := net.Dialer{
|
||||
LocalAddr: laddr,
|
||||
Control: func(network, address string, c syscall.RawConn) error {
|
||||
return setTOS(network, c, tos)
|
||||
},
|
||||
}
|
||||
conn, err := d.Dial("tcp", raddr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = setTOS(raddr.IP, conn, tos); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
return conn.(*net.TCPConn), err
|
||||
}
|
||||
|
||||
// DialTimeoutWithTOS is similar to net.DialTimeout but with the socket configured
|
||||
@ -31,31 +33,16 @@ func DialTCPWithTOS(laddr, raddr *net.TCPAddr, tos byte) (*net.TCPConn, error) {
|
||||
// of service flags to use for incoming connections.
|
||||
func DialTimeoutWithTOS(network, address string, timeout time.Duration, tos byte) (net.Conn,
|
||||
error) {
|
||||
conn, err := net.DialTimeout(network, address, timeout)
|
||||
d := net.Dialer{
|
||||
Timeout: timeout,
|
||||
Control: func(network, address string, c syscall.RawConn) error {
|
||||
return setTOS(network, c, tos)
|
||||
},
|
||||
}
|
||||
conn, err := d.Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ip net.IP
|
||||
// Unfortunately we have to explicitly switch on the address type here to
|
||||
// avoid calling net.ResolveIpAddr(), as this would resolve the address
|
||||
// again leading to a potentially different result.
|
||||
switch addr := conn.RemoteAddr().(type) {
|
||||
case *net.TCPAddr:
|
||||
ip = addr.IP
|
||||
case *net.UDPAddr:
|
||||
ip = addr.IP
|
||||
case *net.IPAddr:
|
||||
ip = addr.IP
|
||||
case *net.IPNet:
|
||||
ip = addr.IP
|
||||
default:
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("DialTimeoutWithTOS: cannot set TOS on a %s socket", network)
|
||||
}
|
||||
if err = setTOS(ip, conn, tos); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
@ -63,15 +50,17 @@ func DialTimeoutWithTOS(network, address string, timeout time.Duration, tos byte
|
||||
// providing an option to specify local address (source)
|
||||
func DialTCPTimeoutWithTOS(laddr, raddr *net.TCPAddr, tos byte, timeout time.Duration) (net.Conn,
|
||||
error) {
|
||||
d := net.Dialer{Timeout: timeout, LocalAddr: laddr}
|
||||
d := net.Dialer{
|
||||
Timeout: timeout,
|
||||
LocalAddr: laddr,
|
||||
Control: func(network, address string, c syscall.RawConn) error {
|
||||
return setTOS(network, c, tos)
|
||||
},
|
||||
}
|
||||
conn, err := d.Dial("tcp", raddr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = setTOS(raddr.IP, conn, tos); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
57
vendor/github.com/aristanetworks/goarista/dscp/dscp.go
generated
vendored
Normal file
57
vendor/github.com/aristanetworks/goarista/dscp/dscp.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package dscp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// ListenTCPWithTOS is similar to net.ListenTCP but with the socket configured
|
||||
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
|
||||
// of service flags to use for incoming connections.
|
||||
func ListenTCPWithTOS(address *net.TCPAddr, tos byte) (*net.TCPListener, error) {
|
||||
cfg := net.ListenConfig{
|
||||
Control: func(network, address string, c syscall.RawConn) error {
|
||||
return setTOS(network, c, tos)
|
||||
},
|
||||
}
|
||||
|
||||
lsnr, err := cfg.Listen(context.Background(), "tcp", address.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lsnr.(*net.TCPListener), err
|
||||
}
|
||||
|
||||
func setTOS(network string, c syscall.RawConn, tos byte) error {
|
||||
return c.Control(func(fd uintptr) {
|
||||
// Configure ipv4 TOS for both IPv4 and IPv6 networks because
|
||||
// v4 connections can still come over v6 networks.
|
||||
err := unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_TOS, int(tos))
|
||||
if err != nil {
|
||||
glog.Errorf("failed to configure IP_TOS: %v", os.NewSyscallError("setsockopt", err))
|
||||
}
|
||||
if strings.HasSuffix(network, "4") {
|
||||
// Skip configuring IPv6 when we know we are using an IPv4
|
||||
// network to avoid error.
|
||||
return
|
||||
}
|
||||
err6 := unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_TCLASS, int(tos))
|
||||
if err6 != nil {
|
||||
glog.Errorf(
|
||||
"failed to configure IPV6_TCLASS, traffic may not use the configured DSCP: %v",
|
||||
os.NewSyscallError("setsockopt", err6))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
25
vendor/github.com/aristanetworks/goarista/dscp/listen.go
generated
vendored
25
vendor/github.com/aristanetworks/goarista/dscp/listen.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// Package dscp provides helper functions to apply DSCP / ECN / CoS flags to sockets.
|
||||
package dscp
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// ListenTCPWithTOS is similar to net.ListenTCP but with the socket configured
|
||||
// to the use the given ToS (Type of Service), to specify DSCP / ECN / class
|
||||
// of service flags to use for incoming connections.
|
||||
func ListenTCPWithTOS(address *net.TCPAddr, tos byte) (*net.TCPListener, error) {
|
||||
lsnr, err := net.ListenTCP("tcp", address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = setTOS(address.IP, lsnr, tos); err != nil {
|
||||
lsnr.Close()
|
||||
return nil, err
|
||||
}
|
||||
return lsnr, err
|
||||
}
|
66
vendor/github.com/aristanetworks/goarista/dscp/tos_unix.go
generated
vendored
66
vendor/github.com/aristanetworks/goarista/dscp/tos_unix.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package dscp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// This works for the UNIX implementation of netFD, i.e. not on Windows and Plan9.
|
||||
// conn must either implement syscall.Conn or be a TCPListener.
|
||||
func setTOS(ip net.IP, conn interface{}, tos byte) error {
|
||||
var proto, optname int
|
||||
if ip.To4() != nil {
|
||||
proto = unix.IPPROTO_IP
|
||||
optname = unix.IP_TOS
|
||||
} else {
|
||||
proto = unix.IPPROTO_IPV6
|
||||
optname = unix.IPV6_TCLASS
|
||||
}
|
||||
|
||||
switch c := conn.(type) {
|
||||
case syscall.Conn:
|
||||
return setTOSWithSyscallConn(proto, optname, c, tos)
|
||||
case *net.TCPListener:
|
||||
// This code is needed to support go1.9. In go1.10
|
||||
// *net.TCPListener implements syscall.Conn.
|
||||
return setTOSWithTCPListener(proto, optname, c, tos)
|
||||
}
|
||||
return fmt.Errorf("unsupported connection type: %T", conn)
|
||||
}
|
||||
|
||||
func setTOSWithTCPListener(proto, optname int, conn *net.TCPListener, tos byte) error {
|
||||
// A kludge for pre-go1.10 to get the fd of a net.TCPListener
|
||||
value := reflect.ValueOf(conn)
|
||||
netFD := value.Elem().FieldByName("fd").Elem()
|
||||
fd := int(netFD.FieldByName("pfd").FieldByName("Sysfd").Int())
|
||||
if err := unix.SetsockoptInt(fd, proto, optname, int(tos)); err != nil {
|
||||
return os.NewSyscallError("setsockopt", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTOSWithSyscallConn(proto, optname int, conn syscall.Conn, tos byte) error {
|
||||
syscallConn, err := conn.SyscallConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var setsockoptErr error
|
||||
err = syscallConn.Control(func(fd uintptr) {
|
||||
if err := unix.SetsockoptInt(int(fd), proto, optname, int(tos)); err != nil {
|
||||
setsockoptErr = os.NewSyscallError("setsockopt", err)
|
||||
}
|
||||
})
|
||||
if setsockoptErr != nil {
|
||||
return setsockoptErr
|
||||
}
|
||||
return err
|
||||
}
|
62
vendor/github.com/aristanetworks/goarista/elasticsearch/json.go
generated
vendored
Normal file
62
vendor/github.com/aristanetworks/goarista/elasticsearch/json.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
// NotificationToMaps converts a gNMI Notification into a map[string][interface] that adheres
|
||||
// to the Data schema defined in schema.go
|
||||
func NotificationToMaps(datasetID string,
|
||||
notification *pb.Notification) ([]map[string]interface{}, error) {
|
||||
var requests []map[string]interface{}
|
||||
var trueVar = true
|
||||
|
||||
ts := time.Unix(0, notification.Timestamp)
|
||||
timeStampNano := strconv.FormatInt(ts.UnixNano(), 10)
|
||||
|
||||
for _, delete := range notification.Delete {
|
||||
path := gnmi.JoinPaths(notification.Prefix, delete)
|
||||
doc := map[string]interface{}{
|
||||
"Timestamp": timeStampNano,
|
||||
"DatasetID": datasetID,
|
||||
"Path": gnmi.StrPath(path),
|
||||
"Del": &trueVar,
|
||||
}
|
||||
|
||||
keyStr := gnmi.StrPath(delete)
|
||||
doc["Key"] = []byte(keyStr) // use strigified delete.Path for key
|
||||
if err := SetKey(doc, keyStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requests = append(requests, doc)
|
||||
}
|
||||
for _, update := range notification.Update {
|
||||
key := update.Path
|
||||
path := gnmi.JoinPaths(notification.Prefix, key)
|
||||
doc := map[string]interface{}{
|
||||
"Timestamp": timeStampNano,
|
||||
"DatasetID": datasetID,
|
||||
"Path": gnmi.StrPath(path),
|
||||
}
|
||||
keyStr := gnmi.StrPath(key)
|
||||
doc["Key"] = []byte(keyStr) // use strigified update.Path for key
|
||||
if err := SetKey(doc, keyStr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := SetValue(doc, update.Val.Value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests = append(requests, doc)
|
||||
}
|
||||
|
||||
return requests, nil
|
||||
}
|
173
vendor/github.com/aristanetworks/goarista/elasticsearch/json_test.go
generated
vendored
Normal file
173
vendor/github.com/aristanetworks/goarista/elasticsearch/json_test.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright (c) 2018 Arista Networks, Inc. All rights reserved.
|
||||
// Arista Networks, Inc. Confidential and Proprietary.
|
||||
// Subject to Arista Networks, Inc.'s EULA.
|
||||
// FOR INTERNAL USE ONLY. NOT FOR DISTRIBUTION.
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/gnmi"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
func stringToGNMIPath(path string) *pb.Path {
|
||||
p, _ := gnmi.ParseGNMIElements(gnmi.SplitPath(path))
|
||||
return p
|
||||
}
|
||||
|
||||
func gnmiUpdate(path string, value *pb.TypedValue) *pb.Update {
|
||||
return &pb.Update{
|
||||
Path: stringToGNMIPath(path),
|
||||
Val: value,
|
||||
}
|
||||
}
|
||||
|
||||
func toPtr(val interface{}) interface{} {
|
||||
switch tv := val.(type) {
|
||||
case string:
|
||||
return &tv
|
||||
case int:
|
||||
i64 := int64(tv)
|
||||
return &i64
|
||||
case bool:
|
||||
return &tv
|
||||
case float64:
|
||||
return &tv
|
||||
default:
|
||||
return &tv
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataConversion(t *testing.T) {
|
||||
cases := []struct {
|
||||
in *pb.Notification
|
||||
data []Data
|
||||
}{
|
||||
{
|
||||
in: &pb.Notification{
|
||||
Timestamp: 123,
|
||||
Prefix: stringToGNMIPath("foo"),
|
||||
Update: []*pb.Update{
|
||||
gnmiUpdate("String", &pb.TypedValue{Value: &pb.TypedValue_StringVal{
|
||||
StringVal: "hello"}}),
|
||||
gnmiUpdate("Int", &pb.TypedValue{Value: &pb.TypedValue_IntVal{
|
||||
IntVal: -123}}),
|
||||
gnmiUpdate("Bool", &pb.TypedValue{Value: &pb.TypedValue_BoolVal{
|
||||
BoolVal: true}}),
|
||||
}},
|
||||
data: []Data{
|
||||
Data{
|
||||
Timestamp: "123",
|
||||
DatasetID: "0",
|
||||
Path: "/foo/String",
|
||||
Key: []byte("/String"),
|
||||
KeyString: toPtr("/String").(*string),
|
||||
ValueString: toPtr("hello").(*string)},
|
||||
Data{
|
||||
Timestamp: "123",
|
||||
DatasetID: "0",
|
||||
Path: "/foo/Int",
|
||||
Key: []byte("/Int"),
|
||||
KeyString: toPtr("/Int").(*string),
|
||||
ValueLong: toPtr(-123).(*int64)},
|
||||
Data{
|
||||
Timestamp: "123",
|
||||
DatasetID: "0",
|
||||
Path: "/foo/Bool",
|
||||
Key: []byte("/Bool"),
|
||||
KeyString: toPtr("/Bool").(*string),
|
||||
ValueBool: toPtr(true).(*bool)},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: &pb.Notification{
|
||||
Timestamp: 234,
|
||||
Prefix: stringToGNMIPath("bar"),
|
||||
Update: []*pb.Update{
|
||||
gnmiUpdate("Decimal", &pb.TypedValue{Value: &pb.TypedValue_DecimalVal{
|
||||
DecimalVal: &pb.Decimal64{Digits: -123, Precision: 2}}}),
|
||||
}},
|
||||
data: []Data{
|
||||
Data{
|
||||
Timestamp: "234",
|
||||
DatasetID: "0",
|
||||
Path: "/bar/Decimal",
|
||||
Key: []byte("/Decimal"),
|
||||
KeyString: toPtr("/Decimal").(*string),
|
||||
ValueDouble: toPtr(-1.23).(*float64)},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: &pb.Notification{
|
||||
Timestamp: 345,
|
||||
Prefix: stringToGNMIPath("baz"),
|
||||
Update: []*pb.Update{
|
||||
gnmiUpdate("Leaflist", &pb.TypedValue{Value: &pb.TypedValue_LeaflistVal{
|
||||
LeaflistVal: &pb.ScalarArray{Element: []*pb.TypedValue{
|
||||
&pb.TypedValue{Value: &pb.TypedValue_StringVal{StringVal: "hello"}},
|
||||
&pb.TypedValue{Value: &pb.TypedValue_IntVal{IntVal: -123}},
|
||||
&pb.TypedValue{Value: &pb.TypedValue_BoolVal{BoolVal: true}},
|
||||
}}}}),
|
||||
}},
|
||||
data: []Data{
|
||||
Data{
|
||||
Timestamp: "345",
|
||||
DatasetID: "0",
|
||||
Path: "/baz/Leaflist",
|
||||
Key: []byte("/Leaflist"),
|
||||
KeyString: toPtr("/Leaflist").(*string),
|
||||
Value: []*field{
|
||||
&field{String: toPtr("hello").(*string)},
|
||||
&field{Long: toPtr(-123).(*int64)},
|
||||
&field{Bool: toPtr(true).(*bool)}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// JsonVal -> ValueString
|
||||
in: &pb.Notification{
|
||||
Timestamp: 456,
|
||||
Prefix: stringToGNMIPath("foo"),
|
||||
Update: []*pb.Update{gnmiUpdate("bar",
|
||||
&pb.TypedValue{Value: &pb.TypedValue_JsonVal{
|
||||
//JsonVal: []byte(`[ {"json": "val"}]`)}})}},
|
||||
JsonVal: []byte("67")}})}},
|
||||
data: []Data{
|
||||
Data{
|
||||
Timestamp: "456",
|
||||
DatasetID: "0",
|
||||
Path: "/foo/bar",
|
||||
Key: []byte("/bar"),
|
||||
KeyString: toPtr("/bar").(*string),
|
||||
ValueDouble: toPtr(float64(67)).(*float64)},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
maps, err := NotificationToMaps("0", tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("issue converting %v to data map. Err: %v", tc.in, err)
|
||||
}
|
||||
if len(maps) != len(tc.data) {
|
||||
t.Fatalf("number of output notifications (%d) does not match expected %d",
|
||||
len(maps), len(tc.data))
|
||||
}
|
||||
byteArr, err := json.Marshal(maps)
|
||||
if err != nil {
|
||||
t.Fatalf("error while trying to marshal map: %v", err)
|
||||
}
|
||||
|
||||
data := []Data{}
|
||||
json.Unmarshal(byteArr, &data)
|
||||
|
||||
if !reflect.DeepEqual(data, tc.data) {
|
||||
gotPretty, _ := json.MarshalIndent(data, "", " ")
|
||||
wantPretty, _ := json.MarshalIndent(tc.data, "", " ")
|
||||
t.Fatalf("reflect struct array mismatch!\n Got: %+v\n Want: %+v",
|
||||
string(gotPretty), string(wantPretty))
|
||||
}
|
||||
}
|
||||
}
|
135
vendor/github.com/aristanetworks/goarista/elasticsearch/mappings.go
generated
vendored
135
vendor/github.com/aristanetworks/goarista/elasticsearch/mappings.go
generated
vendored
@ -5,10 +5,145 @@
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/openconfig/gnmi/proto/gnmi"
|
||||
)
|
||||
|
||||
// EscapeFieldName escapes field names for Elasticsearch
|
||||
func EscapeFieldName(name string) string {
|
||||
return strings.Replace(name, ".", "_", -1)
|
||||
}
|
||||
|
||||
// SetKey fills a Data map's relevant key if the key is a simple type.
|
||||
func SetKey(m map[string]interface{}, key interface{}) error {
|
||||
// In the case of gnmi, these will always be strings
|
||||
if str, ok := key.(string); ok {
|
||||
m["KeyString"] = &str
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown type %v", key)
|
||||
}
|
||||
|
||||
// SetValue fills a Data map's relevant Value fields
|
||||
func SetValue(m map[string]interface{}, val interface{}) error {
|
||||
if str := toStringPtr(val); str != nil {
|
||||
m["ValueString"] = str
|
||||
} else if long := toLongPtr(val); long != nil {
|
||||
m["ValueLong"] = long
|
||||
} else if bl := toBoolPtr(val); bl != nil {
|
||||
m["ValueBool"] = bl
|
||||
} else if dub := toDoublePtr(val); dub != nil {
|
||||
m["ValueDouble"] = dub
|
||||
} else if arr := toValueArray(val); arr != nil {
|
||||
m["Value"] = arr
|
||||
} else if json := toJSONValue(val); json != nil {
|
||||
switch tv := json.(type) {
|
||||
case string:
|
||||
m["ValueString"] = &tv
|
||||
case int, uint:
|
||||
m["ValueLong"] = &tv
|
||||
case bool:
|
||||
m["ValueBool"] = &tv
|
||||
case float32:
|
||||
m["ValueDouble"] = &tv
|
||||
case float64:
|
||||
m["ValueDouble"] = &tv
|
||||
}
|
||||
} else {
|
||||
// this type may not be supported yet, or could not convert
|
||||
return fmt.Errorf("unknown type %v", val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// *TypedValue_StringVal
|
||||
func toStringPtr(val interface{}) *string {
|
||||
if tv, ok := val.(*gnmi.TypedValue_StringVal); ok {
|
||||
return &tv.StringVal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// *TypedValue_IntVal, *TypedValue_UintVal
|
||||
func toLongPtr(val interface{}) *int64 {
|
||||
switch tv := val.(type) {
|
||||
case *gnmi.TypedValue_IntVal:
|
||||
val := int64(tv.IntVal)
|
||||
return &val
|
||||
case *gnmi.TypedValue_UintVal:
|
||||
val := int64(tv.UintVal)
|
||||
return &val
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// *TypedValue_BoolVal
|
||||
func toBoolPtr(val interface{}) *bool {
|
||||
if tv, ok := val.(*gnmi.TypedValue_BoolVal); ok {
|
||||
return &tv.BoolVal
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// *TypedValue_FloatVal, *TypedValue_DecimalVal
|
||||
func toDoublePtr(val interface{}) *float64 {
|
||||
switch tv := val.(type) {
|
||||
case *gnmi.TypedValue_FloatVal:
|
||||
val := float64(tv.FloatVal)
|
||||
if !math.IsInf(val, 0) && !math.IsNaN(val) {
|
||||
return &val
|
||||
}
|
||||
case *gnmi.TypedValue_DecimalVal:
|
||||
// convert to float64 for now
|
||||
val := float64(tv.DecimalVal.Digits)
|
||||
for i := 0; i < int(tv.DecimalVal.Precision); i++ {
|
||||
val /= 10
|
||||
}
|
||||
if !math.IsInf(val, 0) && !math.IsNaN(val) {
|
||||
return &val
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flatten a non-simple type into a []*field
|
||||
func toValueArray(val interface{}) []*map[string]interface{} {
|
||||
if tv, ok := val.(*gnmi.TypedValue_LeaflistVal); ok {
|
||||
var fields []*map[string]interface{}
|
||||
// LeaflistVal should only have simple types
|
||||
for _, el := range tv.LeaflistVal.Element {
|
||||
m := make(map[string]interface{})
|
||||
if str := toStringPtr(el.Value); str != nil {
|
||||
m["String"] = str
|
||||
} else if long := toLongPtr(el.Value); long != nil {
|
||||
m["Long"] = long
|
||||
} else if bl := toBoolPtr(el.Value); bl != nil {
|
||||
m["Bool"] = bl
|
||||
} else if dub := toDoublePtr(el.Value); dub != nil {
|
||||
m["Double"] = dub
|
||||
} else {
|
||||
// this type is not supported yet
|
||||
return nil
|
||||
}
|
||||
fields = append(fields, &m)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshall a json value
|
||||
func toJSONValue(val interface{}) interface{} {
|
||||
if tv, ok := val.(*gnmi.TypedValue_JsonVal); ok {
|
||||
var out interface{}
|
||||
if err := json.Unmarshal(tv.JsonVal, &out); err != nil {
|
||||
return nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
295
vendor/github.com/aristanetworks/goarista/elasticsearch/schema.go
generated
vendored
Normal file
295
vendor/github.com/aristanetworks/goarista/elasticsearch/schema.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
||||
// Copyright (c) 2018 Arista Networks, Inc. All rights reserved.
|
||||
// Arista Networks, Inc. Confidential and Proprietary.
|
||||
// Subject to Arista Networks, Inc.'s EULA.
|
||||
// FOR INTERNAL USE ONLY. NOT FOR DISTRIBUTION.
|
||||
|
||||
package elasticsearch
|
||||
|
||||
type field struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
String *string `json:"string,omitempty"`
|
||||
Double *float64 `json:"double,omitempty"`
|
||||
Long *int64 `json:"long,omitempty"`
|
||||
Bool *bool `json:"bool,omitempty"`
|
||||
Ptr *string `json:"ptr,omitempty"`
|
||||
// If the string looks like an ip address
|
||||
// we also index it here as ip address
|
||||
IP string `json:"ip,omitempty"`
|
||||
// If the string looks like a mac address
|
||||
// we also index it here as mac address
|
||||
MAC string `json:"mac,omitempty"`
|
||||
}
|
||||
|
||||
// Data represents the document format for a notification
|
||||
type Data struct {
|
||||
// The timestamp in nanosecond resolution
|
||||
Timestamp string
|
||||
// Organization ID
|
||||
OrgID string
|
||||
DatasetType string
|
||||
// The datasetID
|
||||
DatasetID string
|
||||
// The stringified path
|
||||
Path string
|
||||
// The codec encoded key
|
||||
Key []byte
|
||||
// The key data
|
||||
// this array will have each entry as an object with "name" field
|
||||
// and "<type>" field for value.
|
||||
// If name is not set, the data is put in one of the simple type fields
|
||||
// The problem with nested types is that each entry in the array is creating a doc
|
||||
// and the number of docs is exploding which is not good.
|
||||
// So one optimization is to flatten for simple values and not use the nested field.
|
||||
KeyData []*field `json:",omitempty"`
|
||||
KeyString *string `json:",omitempty"`
|
||||
KeyDouble *float64 `json:",omitempty"`
|
||||
KeyLong *int64 `json:",omitempty"`
|
||||
KeyBool *bool `json:",omitempty"`
|
||||
KeyPtr *string `json:",omitempty"`
|
||||
// If the simple string looks like an ip address
|
||||
// we also index it here as ip address
|
||||
KeyIP string `json:",omitempty"`
|
||||
// If the simple string looks like a mac address
|
||||
// we also index it here as mac address
|
||||
KeyMAC string `json:",omitempty"`
|
||||
// The value data
|
||||
// this array will have each entry as an object with "name" field
|
||||
// and "<type>" field for value.
|
||||
// If name is not set, the data was a simple value
|
||||
// The problem with nested types is that each entry in the array is creating a doc
|
||||
// and the number of docs is exploding which is not good.
|
||||
// So one optimization is to flatten for simple values and not use the nested field.
|
||||
Value []*field `json:",omitempty"`
|
||||
ValueString *string `json:",omitempty"`
|
||||
ValueDouble *float64 `json:",omitempty"`
|
||||
ValueLong *int64 `json:",omitempty"`
|
||||
ValueBool *bool `json:",omitempty"`
|
||||
ValuePtr *string `json:",omitempty"`
|
||||
// If the simple string looks like an ip address
|
||||
// we also index it here as ip address
|
||||
ValueIP string `json:",omitempty"`
|
||||
// If the simple string looks like a mac address
|
||||
// we also index it here as mac address
|
||||
ValueMAC string `json:",omitempty"`
|
||||
|
||||
// Present when it's a delete
|
||||
// In this case, value will not be present
|
||||
Del *bool `json:",omitempty"`
|
||||
// Present when it's a deleteAll
|
||||
// In this case, key and value will not be present
|
||||
DelAll *bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
var index = map[string]interface{}{
|
||||
"settings": map[string]interface{}{
|
||||
"index": map[string]interface{}{
|
||||
"codec": "best_compression",
|
||||
"number_of_shards": 5,
|
||||
"number_of_replicas": 2,
|
||||
},
|
||||
"analysis": map[string]interface{}{
|
||||
"analyzer": map[string]interface{}{
|
||||
"mac_analyzer": map[string]interface{}{
|
||||
"tokenizer": "mac_tokenizer",
|
||||
"filter": []string{
|
||||
"lowercase",
|
||||
},
|
||||
},
|
||||
"path_analyzer": map[string]interface{}{
|
||||
"tokenizer": "path_tokenizer",
|
||||
},
|
||||
},
|
||||
"tokenizer": map[string]interface{}{
|
||||
"mac_tokenizer": map[string]interface{}{
|
||||
"type": "edgeNGram",
|
||||
"min_gram": "2",
|
||||
"max_gram": "17",
|
||||
},
|
||||
"path_tokenizer": map[string]interface{}{
|
||||
"type": "path_hierarchy",
|
||||
"delimiter": "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// ID of the doc is:
|
||||
// {orgid}-{dataset_id}-{md5 "{tsnano}-{codec_path}-{codec_key}"}
|
||||
// Note: For DeleteAll the "-codec_key" is ommited
|
||||
// id in elasticsearch can be 512 bytes max, so we use sha1 to hash.
|
||||
// We theorically can have collision. It will unlikely happen.
|
||||
// In case there is a collision, too bad, we'll have corrupted data.
|
||||
// We have the datasetid in the id, so in the unlikely case we have a collision,
|
||||
// this collision cannot happen across organizations/devices.
|
||||
"mappings": map[string]interface{}{
|
||||
"_doc": map[string]interface{}{
|
||||
"properties": map[string]interface{}{
|
||||
// Timestamp in nanoseconds
|
||||
"Timestamp": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
// Organization id
|
||||
"OrgID": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
// Dataset type
|
||||
"DatasetType": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
// Dataset id
|
||||
"DatasetID": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
// base64 encoded of codec encoded representation of the path
|
||||
// "path": {
|
||||
// "type": "binary"
|
||||
// },
|
||||
// The stringified version of the path
|
||||
"Path": map[string]interface{}{
|
||||
"type": "keyword",
|
||||
},
|
||||
// base64 encoded of codec encoded representation of the key
|
||||
"Key": map[string]interface{}{
|
||||
"type": "binary",
|
||||
"doc_values": true,
|
||||
},
|
||||
// this array will have each entry as an object with "name" field
|
||||
// and "<type>" field for value.
|
||||
// If name is not set, the data was a simple value
|
||||
"KeyData": map[string]interface{}{
|
||||
"type": "nested",
|
||||
"properties": map[string]interface{}{
|
||||
"name": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
"long": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
"string": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
"double": map[string]interface{}{
|
||||
"type": "double",
|
||||
},
|
||||
"bool": map[string]interface{}{
|
||||
"type": "boolean",
|
||||
},
|
||||
"ptr": map[string]interface{}{
|
||||
"type": "keyword",
|
||||
},
|
||||
"ip": map[string]interface{}{
|
||||
"type": "ip",
|
||||
},
|
||||
"mac": map[string]interface{}{
|
||||
"type": "text",
|
||||
"analyzer": "mac_analyzer",
|
||||
"search_analyzer": "keyword",
|
||||
},
|
||||
},
|
||||
},
|
||||
"KeyLong": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
"KeyString": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
"KeyDouble": map[string]interface{}{
|
||||
"type": "double",
|
||||
},
|
||||
"KeyBool": map[string]interface{}{
|
||||
"type": "boolean",
|
||||
},
|
||||
"KeyPtr": map[string]interface{}{
|
||||
"type": "keyword",
|
||||
},
|
||||
"KeyIP": map[string]interface{}{
|
||||
"type": "ip",
|
||||
},
|
||||
"KeyMAC": map[string]interface{}{
|
||||
"type": "text",
|
||||
"analyzer": "mac_analyzer",
|
||||
"search_analyzer": "keyword",
|
||||
},
|
||||
// this array will have each entry as an object with "name" field
|
||||
// and "<type>" field for value.
|
||||
// If name is not set, the data was a simple value
|
||||
"Value": map[string]interface{}{
|
||||
"type": "nested",
|
||||
"properties": map[string]interface{}{
|
||||
"name": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
"long": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
"string": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
"double": map[string]interface{}{
|
||||
"type": "double",
|
||||
},
|
||||
"bool": map[string]interface{}{
|
||||
"type": "boolean",
|
||||
},
|
||||
"ptr": map[string]interface{}{
|
||||
"type": "keyword",
|
||||
},
|
||||
"ip": map[string]interface{}{
|
||||
"type": "ip",
|
||||
},
|
||||
"mac": map[string]interface{}{
|
||||
"type": "text",
|
||||
"analyzer": "mac_analyzer",
|
||||
"search_analyzer": "keyword",
|
||||
},
|
||||
},
|
||||
},
|
||||
"ValueLong": map[string]interface{}{
|
||||
"type": "long",
|
||||
},
|
||||
"ValueString": map[string]interface{}{
|
||||
"type": "text",
|
||||
},
|
||||
"ValueDouble": map[string]interface{}{
|
||||
"type": "double",
|
||||
},
|
||||
"ValueBool": map[string]interface{}{
|
||||
"type": "boolean",
|
||||
},
|
||||
"ValuePtr": map[string]interface{}{
|
||||
"type": "keyword",
|
||||
},
|
||||
"ValueIP": map[string]interface{}{
|
||||
"type": "ip",
|
||||
},
|
||||
"ValueMAC": map[string]interface{}{
|
||||
"type": "text",
|
||||
"analyzer": "mac_analyzer",
|
||||
"search_analyzer": "keyword",
|
||||
},
|
||||
// Present when it's a delete
|
||||
// In this case, value will not be present
|
||||
"Del": map[string]interface{}{
|
||||
"type": "boolean",
|
||||
},
|
||||
// Present when it's a deleteAll
|
||||
// In this case, key and value will not be present
|
||||
"DelAll": map[string]interface{}{
|
||||
"type": "boolean",
|
||||
},
|
||||
"query": map[string]interface{}{
|
||||
"type": "percolator",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// excludedFields are fields that are not affected by init options
|
||||
// this is mainly to make excluded numeric types queryable
|
||||
var excludedFields = map[string]interface{}{
|
||||
"Timestamp": struct{}{},
|
||||
"OrgID": struct{}{},
|
||||
"DatasetType": struct{}{},
|
||||
"DatasetID": struct{}{},
|
||||
}
|
43
vendor/github.com/aristanetworks/goarista/flag/helpers.go
generated
vendored
Normal file
43
vendor/github.com/aristanetworks/goarista/flag/helpers.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package flag
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FormatOptions writes a mapping of options to usage information
|
||||
// that looks like standard Go help information. The header should
|
||||
// end with a colon if options are provided.
|
||||
func FormatOptions(w io.Writer, header string, usageMap map[string]string) {
|
||||
ops := []string{}
|
||||
for k := range usageMap {
|
||||
ops = append(ops, k)
|
||||
}
|
||||
sort.Strings(ops)
|
||||
fmt.Fprintf(w, "%v\n", header)
|
||||
for _, o := range ops {
|
||||
fmt.Fprintf(w, " %v\n\t%v\n", o, usageMap[o])
|
||||
}
|
||||
}
|
||||
|
||||
// AddHelp adds indented documentation to flag.Usage.
|
||||
func AddHelp(seperator, help string) {
|
||||
result := []string{}
|
||||
s := strings.Split(help, "\n")
|
||||
for _, line := range s {
|
||||
result = append(result, " "+line)
|
||||
}
|
||||
old := flag.Usage
|
||||
flag.Usage = func() {
|
||||
old()
|
||||
fmt.Println(seperator)
|
||||
fmt.Print(strings.TrimRight(strings.Join(result, "\n"), " "))
|
||||
}
|
||||
}
|
56
vendor/github.com/aristanetworks/goarista/flag/map.go
generated
vendored
Normal file
56
vendor/github.com/aristanetworks/goarista/flag/map.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package flag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Map is a type used to provide mapped options via command line flags.
|
||||
// It implements the flag.Value interface.
|
||||
// If a flag is passed without a value, for example: `-option somebool` it will still be
|
||||
// initialized, so you can use `_, ok := option["somebool"]` to check if it exists.
|
||||
type Map map[string]string
|
||||
|
||||
// String is the method to format the flag's value, part of the flag.Value interface.
|
||||
// The String method's output is used in diagnostics.
|
||||
func (o Map) String() string {
|
||||
return fmt.Sprintf("%#v", o)
|
||||
}
|
||||
|
||||
// Set is the method to set the flag value, part of the flag.Value interface.
|
||||
// Set's argument is a string to be parsed to set the flag.
|
||||
// It still initializes flags that don't explicitly set a string
|
||||
func (o Map) Set(value string) error {
|
||||
var k, v string
|
||||
idx := strings.Index(value, "=")
|
||||
if idx == -1 {
|
||||
k = value
|
||||
} else {
|
||||
k = value[:idx]
|
||||
v = value[idx+1:]
|
||||
}
|
||||
if _, exists := o[k]; exists {
|
||||
return fmt.Errorf("%v is a duplicate option", k)
|
||||
}
|
||||
|
||||
o[k] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the golang type string. This method is required by pflag library.
|
||||
func (o Map) Type() string {
|
||||
return "Map"
|
||||
}
|
||||
|
||||
// Clone returns a copy of flag options
|
||||
func (o Map) Clone() Map {
|
||||
options := make(Map, len(o))
|
||||
for k, v := range o {
|
||||
options[k] = v
|
||||
}
|
||||
return options
|
||||
}
|
23
vendor/github.com/aristanetworks/goarista/flag/no_args.go
generated
vendored
Normal file
23
vendor/github.com/aristanetworks/goarista/flag/no_args.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package flag
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// CheckNoArgs checks if any positional arguments were provided and if so, exit with error.
|
||||
func CheckNoArgs() error {
|
||||
if !flag.Parsed() {
|
||||
panic("CheckNoArgs must be called after flags have been parsed")
|
||||
}
|
||||
|
||||
if flag.NArg() == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%s doesn't accept positional arguments: %s", os.Args[0], flag.Arg(0))
|
||||
}
|
23
vendor/github.com/aristanetworks/goarista/flag/string_array.go
generated
vendored
Normal file
23
vendor/github.com/aristanetworks/goarista/flag/string_array.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package flag
|
||||
|
||||
import "fmt"
|
||||
|
||||
// StringArrayOption is a type used to provide string options via command line flags.
|
||||
type StringArrayOption []string
|
||||
|
||||
// String is the method to format the flag's value, part of the flag.Value interface.
|
||||
// The String method's output will be used in diagnostics.
|
||||
func (a *StringArrayOption) String() string {
|
||||
return fmt.Sprintf("%#v", *a)
|
||||
}
|
||||
|
||||
// Set is the method to set the flag value, part of the flag.Value interface.
|
||||
// Set's argument is a string to be parsed to set the flag.
|
||||
func (a *StringArrayOption) Set(value string) error {
|
||||
*a = append(*a, value)
|
||||
return nil
|
||||
}
|
24
vendor/github.com/aristanetworks/goarista/flag/string_array_test.go
generated
vendored
Normal file
24
vendor/github.com/aristanetworks/goarista/flag/string_array_test.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package flag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStringArrayFlag(t *testing.T) {
|
||||
|
||||
var excludePathPrefixes = StringArrayOption{}
|
||||
if len(excludePathPrefixes) != 0 {
|
||||
t.Fatalf("Expected length 0, saw %d", len(excludePathPrefixes))
|
||||
}
|
||||
|
||||
if err := excludePathPrefixes.Set("arg1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(excludePathPrefixes) != 1 {
|
||||
t.Fatalf("Expected length 1, saw %d", len(excludePathPrefixes))
|
||||
}
|
||||
}
|
37
vendor/github.com/aristanetworks/goarista/gnmi/client.go
generated
vendored
37
vendor/github.com/aristanetworks/goarista/gnmi/client.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
@ -29,13 +30,15 @@ const (
|
||||
|
||||
// Config is the gnmi.Client config
|
||||
type Config struct {
|
||||
Addr string
|
||||
CAFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
Password string
|
||||
Username string
|
||||
TLS bool
|
||||
Addr string
|
||||
CAFile string
|
||||
CertFile string
|
||||
KeyFile string
|
||||
Password string
|
||||
Username string
|
||||
TLS bool
|
||||
Compression string
|
||||
DialOptions []grpc.DialOption
|
||||
}
|
||||
|
||||
// SubscribeOptions is the gNMI subscription request options
|
||||
@ -47,11 +50,21 @@ type SubscribeOptions struct {
|
||||
SampleInterval uint64
|
||||
HeartbeatInterval uint64
|
||||
Paths [][]string
|
||||
Origin string
|
||||
}
|
||||
|
||||
// Dial connects to a gnmi service and returns a client
|
||||
func Dial(cfg *Config) (pb.GNMIClient, error) {
|
||||
var opts []grpc.DialOption
|
||||
opts := append([]grpc.DialOption(nil), cfg.DialOptions...)
|
||||
|
||||
switch cfg.Compression {
|
||||
case "":
|
||||
case "gzip":
|
||||
opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression option: %q", cfg.Compression)
|
||||
}
|
||||
|
||||
if cfg.TLS || cfg.CAFile != "" || cfg.CertFile != "" {
|
||||
tlsConfig := &tls.Config{}
|
||||
if cfg.CAFile != "" {
|
||||
@ -126,7 +139,7 @@ func NewContext(ctx context.Context, cfg *Config) context.Context {
|
||||
}
|
||||
|
||||
// NewGetRequest returns a GetRequest for the given paths
|
||||
func NewGetRequest(paths [][]string) (*pb.GetRequest, error) {
|
||||
func NewGetRequest(paths [][]string, origin string) (*pb.GetRequest, error) {
|
||||
req := &pb.GetRequest{
|
||||
Path: make([]*pb.Path, len(paths)),
|
||||
}
|
||||
@ -136,6 +149,7 @@ func NewGetRequest(paths [][]string) (*pb.GetRequest, error) {
|
||||
return nil, err
|
||||
}
|
||||
req.Path[i] = gnmiPath
|
||||
req.Path[i].Origin = origin
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
@ -148,6 +162,8 @@ func NewSubscribeRequest(subscribeOptions *SubscribeOptions) (*pb.SubscribeReque
|
||||
mode = pb.SubscriptionList_ONCE
|
||||
case "poll":
|
||||
mode = pb.SubscriptionList_POLL
|
||||
case "":
|
||||
fallthrough
|
||||
case "stream":
|
||||
mode = pb.SubscriptionList_STREAM
|
||||
default:
|
||||
@ -160,6 +176,8 @@ func NewSubscribeRequest(subscribeOptions *SubscribeOptions) (*pb.SubscribeReque
|
||||
streamMode = pb.SubscriptionMode_ON_CHANGE
|
||||
case "sample":
|
||||
streamMode = pb.SubscriptionMode_SAMPLE
|
||||
case "":
|
||||
fallthrough
|
||||
case "target_defined":
|
||||
streamMode = pb.SubscriptionMode_TARGET_DEFINED
|
||||
default:
|
||||
@ -181,6 +199,7 @@ func NewSubscribeRequest(subscribeOptions *SubscribeOptions) (*pb.SubscribeReque
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gnmiPath.Origin = subscribeOptions.Origin
|
||||
subList.Subscription[i] = &pb.Subscription{
|
||||
Path: gnmiPath,
|
||||
Mode: streamMode,
|
||||
|
136
vendor/github.com/aristanetworks/goarista/gnmi/operation.go
generated
vendored
136
vendor/github.com/aristanetworks/goarista/gnmi/operation.go
generated
vendored
@ -14,20 +14,20 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
pb "github.com/openconfig/gnmi/proto/gnmi"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// Get sents a GetRequest to the given client.
|
||||
func Get(ctx context.Context, client pb.GNMIClient, paths [][]string) error {
|
||||
req, err := NewGetRequest(paths)
|
||||
func Get(ctx context.Context, client pb.GNMIClient, paths [][]string, origin string) error {
|
||||
req, err := NewGetRequest(paths, origin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -144,6 +144,8 @@ func StrVal(val *pb.TypedValue) string {
|
||||
return v.AsciiVal
|
||||
case *pb.TypedValue_AnyVal:
|
||||
return v.AnyVal.String()
|
||||
case *pb.TypedValue_ProtoBytes:
|
||||
return base64.StdEncoding.EncodeToString(v.ProtoBytes)
|
||||
default:
|
||||
panic(v)
|
||||
}
|
||||
@ -180,41 +182,137 @@ func strDecimal64(d *pb.Decimal64) string {
|
||||
|
||||
// strLeafList builds a human-readable form of a leaf-list. e.g. [1, 2, 3] or [a, b, c]
|
||||
func strLeaflist(v *pb.ScalarArray) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteByte('[')
|
||||
var b strings.Builder
|
||||
b.WriteByte('[')
|
||||
|
||||
for i, elm := range v.Element {
|
||||
buf.WriteString(StrVal(elm))
|
||||
b.WriteString(StrVal(elm))
|
||||
if i < len(v.Element)-1 {
|
||||
buf.WriteString(", ")
|
||||
b.WriteString(", ")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
b.WriteByte(']')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func update(p *pb.Path, val string) *pb.Update {
|
||||
// ExtractValue pulls a value out of a gNMI Update, parsing JSON if present.
|
||||
// Possible return types:
|
||||
// string
|
||||
// int64
|
||||
// uint64
|
||||
// bool
|
||||
// []byte
|
||||
// float32
|
||||
// *gnmi.Decimal64
|
||||
// json.Number
|
||||
// *any.Any
|
||||
// []interface{}
|
||||
// map[string]interface{}
|
||||
func ExtractValue(update *pb.Update) (interface{}, error) {
|
||||
var i interface{}
|
||||
var err error
|
||||
if update == nil {
|
||||
return nil, fmt.Errorf("empty update")
|
||||
}
|
||||
if update.Val != nil {
|
||||
i, err = extractValueV04(update.Val)
|
||||
} else if update.Value != nil {
|
||||
i, err = extractValueV03(update.Value)
|
||||
}
|
||||
return i, err
|
||||
}
|
||||
|
||||
func extractValueV04(val *pb.TypedValue) (interface{}, error) {
|
||||
switch v := val.Value.(type) {
|
||||
case *pb.TypedValue_StringVal:
|
||||
return v.StringVal, nil
|
||||
case *pb.TypedValue_IntVal:
|
||||
return v.IntVal, nil
|
||||
case *pb.TypedValue_UintVal:
|
||||
return v.UintVal, nil
|
||||
case *pb.TypedValue_BoolVal:
|
||||
return v.BoolVal, nil
|
||||
case *pb.TypedValue_BytesVal:
|
||||
return v.BytesVal, nil
|
||||
case *pb.TypedValue_FloatVal:
|
||||
return v.FloatVal, nil
|
||||
case *pb.TypedValue_DecimalVal:
|
||||
return v.DecimalVal, nil
|
||||
case *pb.TypedValue_LeaflistVal:
|
||||
elementList := v.LeaflistVal.Element
|
||||
l := make([]interface{}, len(elementList))
|
||||
for i, element := range elementList {
|
||||
el, err := extractValueV04(element)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l[i] = el
|
||||
}
|
||||
return l, nil
|
||||
case *pb.TypedValue_AnyVal:
|
||||
return v.AnyVal, nil
|
||||
case *pb.TypedValue_JsonVal:
|
||||
return decode(v.JsonVal)
|
||||
case *pb.TypedValue_JsonIetfVal:
|
||||
return decode(v.JsonIetfVal)
|
||||
case *pb.TypedValue_AsciiVal:
|
||||
return v.AsciiVal, nil
|
||||
case *pb.TypedValue_ProtoBytes:
|
||||
return v.ProtoBytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unhandled type of value %v", val.GetValue())
|
||||
}
|
||||
|
||||
func extractValueV03(val *pb.Value) (interface{}, error) {
|
||||
switch val.Type {
|
||||
case pb.Encoding_JSON, pb.Encoding_JSON_IETF:
|
||||
return decode(val.Value)
|
||||
case pb.Encoding_BYTES, pb.Encoding_PROTO:
|
||||
return val.Value, nil
|
||||
case pb.Encoding_ASCII:
|
||||
return string(val.Value), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unhandled type of value %v", val.GetValue())
|
||||
}
|
||||
|
||||
func decode(byteArr []byte) (interface{}, error) {
|
||||
decoder := json.NewDecoder(bytes.NewReader(byteArr))
|
||||
decoder.UseNumber()
|
||||
var value interface{}
|
||||
err := decoder.Decode(&value)
|
||||
return value, err
|
||||
}
|
||||
|
||||
// DecimalToFloat converts a gNMI Decimal64 to a float64
|
||||
func DecimalToFloat(dec *pb.Decimal64) float64 {
|
||||
return float64(dec.Digits) / math.Pow10(int(dec.Precision))
|
||||
}
|
||||
|
||||
func update(p *pb.Path, val string) (*pb.Update, error) {
|
||||
var v *pb.TypedValue
|
||||
switch p.Origin {
|
||||
case "":
|
||||
v = &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonIetfVal{JsonIetfVal: extractJSON(val)}}
|
||||
case "eos_native":
|
||||
v = &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: extractJSON(val)}}
|
||||
case "cli", "test-regen-cli":
|
||||
v = &pb.TypedValue{
|
||||
Value: &pb.TypedValue_AsciiVal{AsciiVal: val}}
|
||||
case "p4_config":
|
||||
b, err := ioutil.ReadFile(val)
|
||||
if err != nil {
|
||||
glog.Fatalf("Cannot read p4 file: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
v = &pb.TypedValue{
|
||||
Value: &pb.TypedValue_ProtoBytes{ProtoBytes: b}}
|
||||
default:
|
||||
panic(fmt.Errorf("unexpected origin: %q", p.Origin))
|
||||
return nil, fmt.Errorf("unexpected origin: %q", p.Origin)
|
||||
}
|
||||
|
||||
return &pb.Update{Path: p, Val: v}
|
||||
return &pb.Update{Path: p, Val: v}, nil
|
||||
}
|
||||
|
||||
// Operation describes an gNMI operation.
|
||||
@ -238,9 +336,17 @@ func newSetRequest(setOps []*Operation) (*pb.SetRequest, error) {
|
||||
case "delete":
|
||||
req.Delete = append(req.Delete, p)
|
||||
case "update":
|
||||
req.Update = append(req.Update, update(p, op.Val))
|
||||
u, err := update(p, op.Val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Update = append(req.Update, u)
|
||||
case "replace":
|
||||
req.Replace = append(req.Replace, update(p, op.Val))
|
||||
u, err := update(p, op.Val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Replace = append(req.Replace, u)
|
||||
}
|
||||
}
|
||||
return req, nil
|
||||
|
78
vendor/github.com/aristanetworks/goarista/gnmi/operation_test.go
generated
vendored
78
vendor/github.com/aristanetworks/goarista/gnmi/operation_test.go
generated
vendored
@ -6,6 +6,7 @@ package gnmi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
@ -237,6 +238,11 @@ func TestStrUpdateVal(t *testing.T) {
|
||||
Value: &pb.TypedValue_AsciiVal{AsciiVal: "foobar"}}},
|
||||
exp: "foobar",
|
||||
},
|
||||
"ProtoBytes": {
|
||||
update: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_ProtoBytes{ProtoBytes: anyBytes}}},
|
||||
exp: "CgZmb29iYXI=",
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := StrUpdateVal(tc.update)
|
||||
@ -287,3 +293,75 @@ func TestExtractJSON(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractValue(t *testing.T) {
|
||||
cases := []struct {
|
||||
in *pb.Update
|
||||
exp interface{}
|
||||
}{{
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_StringVal{StringVal: "foo"}}},
|
||||
exp: "foo",
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_IntVal{IntVal: 123}}},
|
||||
exp: int64(123),
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_UintVal{UintVal: 123}}},
|
||||
exp: uint64(123),
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_BoolVal{BoolVal: true}}},
|
||||
exp: true,
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_BytesVal{BytesVal: []byte{0xde, 0xad}}}},
|
||||
exp: []byte{0xde, 0xad},
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_FloatVal{FloatVal: -12.34}}},
|
||||
exp: float32(-12.34),
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_DecimalVal{DecimalVal: &pb.Decimal64{
|
||||
Digits: -1234, Precision: 2}}}},
|
||||
exp: &pb.Decimal64{Digits: -1234, Precision: 2},
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_LeaflistVal{LeaflistVal: &pb.ScalarArray{
|
||||
Element: []*pb.TypedValue{
|
||||
&pb.TypedValue{Value: &pb.TypedValue_StringVal{StringVal: "foo"}},
|
||||
&pb.TypedValue{Value: &pb.TypedValue_IntVal{IntVal: 123}}}}}}},
|
||||
exp: []interface{}{"foo", int64(123)},
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte(`12.34`)}}},
|
||||
exp: json.Number("12.34"),
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte(`[12.34, 123, "foo"]`)}}},
|
||||
exp: []interface{}{json.Number("12.34"), json.Number("123"), "foo"},
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte(`{"foo":"bar"}`)}}},
|
||||
exp: map[string]interface{}{"foo": "bar"},
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonVal{JsonVal: []byte(`{"foo":45.67}`)}}},
|
||||
exp: map[string]interface{}{"foo": json.Number("45.67")},
|
||||
}, {
|
||||
in: &pb.Update{Val: &pb.TypedValue{
|
||||
Value: &pb.TypedValue_JsonIetfVal{JsonIetfVal: []byte(`{"foo":"bar"}`)}}},
|
||||
exp: map[string]interface{}{"foo": "bar"},
|
||||
}}
|
||||
for _, tc := range cases {
|
||||
out, err := ExtractValue(tc.in)
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if !test.DeepEqual(tc.exp, out) {
|
||||
t.Errorf("Extracted value is incorrect. Expected %+v, got %+v", tc.exp, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
57
vendor/github.com/aristanetworks/goarista/gnmi/path.go
generated
vendored
57
vendor/github.com/aristanetworks/goarista/gnmi/path.go
generated
vendored
@ -5,7 +5,6 @@
|
||||
package gnmi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -85,10 +84,10 @@ func StrPath(path *pb.Path) string {
|
||||
|
||||
// strPathV04 handles the v0.4 gnmi and later path.Elem member.
|
||||
func strPathV04(path *pb.Path) string {
|
||||
buf := &bytes.Buffer{}
|
||||
b := &strings.Builder{}
|
||||
for _, elm := range path.Elem {
|
||||
buf.WriteRune('/')
|
||||
writeSafeString(buf, elm.Name, '/')
|
||||
b.WriteRune('/')
|
||||
writeSafeString(b, elm.Name, '/')
|
||||
if len(elm.Key) > 0 {
|
||||
// Sort the keys so that they print in a conistent
|
||||
// order. We don't have the YANG AST information, so the
|
||||
@ -99,15 +98,15 @@ func strPathV04(path *pb.Path) string {
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
buf.WriteRune('[')
|
||||
buf.WriteString(k)
|
||||
buf.WriteRune('=')
|
||||
writeSafeString(buf, elm.Key[k], ']')
|
||||
buf.WriteRune(']')
|
||||
b.WriteRune('[')
|
||||
b.WriteString(k)
|
||||
b.WriteRune('=')
|
||||
writeSafeString(b, elm.Key[k], ']')
|
||||
b.WriteRune(']')
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// strPathV03 handles the v0.3 gnmi and earlier path.Element member.
|
||||
@ -115,12 +114,36 @@ func strPathV03(path *pb.Path) string {
|
||||
return "/" + strings.Join(path.Element, "/")
|
||||
}
|
||||
|
||||
func writeSafeString(buf *bytes.Buffer, s string, esc rune) {
|
||||
// upgradePath modernizes a Path by translating the contents of the Element field to Elem
|
||||
func upgradePath(path *pb.Path) *pb.Path {
|
||||
if len(path.Elem) == 0 {
|
||||
var elems []*pb.PathElem
|
||||
for _, element := range path.Element {
|
||||
n, keys, _ := parseElement(element)
|
||||
elems = append(elems, &pb.PathElem{Name: n, Key: keys})
|
||||
}
|
||||
path.Elem = elems
|
||||
path.Element = nil
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// JoinPaths joins multiple gnmi paths and returns a string representation
|
||||
func JoinPaths(paths ...*pb.Path) *pb.Path {
|
||||
var elems []*pb.PathElem
|
||||
for _, path := range paths {
|
||||
path = upgradePath(path)
|
||||
elems = append(elems, path.Elem...)
|
||||
}
|
||||
return &pb.Path{Elem: elems}
|
||||
}
|
||||
|
||||
func writeSafeString(b *strings.Builder, s string, esc rune) {
|
||||
for _, c := range s {
|
||||
if c == esc || c == '\\' {
|
||||
buf.WriteRune('\\')
|
||||
b.WriteRune('\\')
|
||||
}
|
||||
buf.WriteRune(c)
|
||||
b.WriteRune(c)
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,19 +233,19 @@ func findUnescaped(s string, find byte) (string, int) {
|
||||
}
|
||||
|
||||
// Find the first match, taking care of escaped chars.
|
||||
buf := &bytes.Buffer{}
|
||||
var b strings.Builder
|
||||
var i int
|
||||
len := len(s)
|
||||
for i = 0; i < len; {
|
||||
ch := s[i]
|
||||
if ch == find {
|
||||
return buf.String(), i
|
||||
return b.String(), i
|
||||
} else if ch == '\\' && i < len-1 {
|
||||
i++
|
||||
ch = s[i]
|
||||
}
|
||||
buf.WriteByte(ch)
|
||||
b.WriteByte(ch)
|
||||
i++
|
||||
}
|
||||
return buf.String(), -1
|
||||
return b.String(), -1
|
||||
}
|
||||
|
56
vendor/github.com/aristanetworks/goarista/gnmi/path_test.go
generated
vendored
56
vendor/github.com/aristanetworks/goarista/gnmi/path_test.go
generated
vendored
@ -226,6 +226,62 @@ func TestParseElement(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func strToPath(pathStr string) *pb.Path {
|
||||
splitPath := SplitPath(pathStr)
|
||||
path, _ := ParseGNMIElements(splitPath)
|
||||
path.Element = nil
|
||||
return path
|
||||
}
|
||||
|
||||
func strsToPaths(pathStrs []string) []*pb.Path {
|
||||
var paths []*pb.Path
|
||||
for _, splitPath := range SplitPaths(pathStrs) {
|
||||
path, _ := ParseGNMIElements(splitPath)
|
||||
path.Element = nil
|
||||
paths = append(paths, path)
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
func TestJoinPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
paths []*pb.Path
|
||||
exp string
|
||||
}{{
|
||||
paths: strsToPaths([]string{"/foo/bar", "/baz/qux"}),
|
||||
exp: "/foo/bar/baz/qux",
|
||||
},
|
||||
{
|
||||
paths: strsToPaths([]string{
|
||||
"/foo/bar[somekey=someval][otherkey=otherval]", "/baz/qux"}),
|
||||
exp: "/foo/bar[otherkey=otherval][somekey=someval]/baz/qux",
|
||||
},
|
||||
{
|
||||
paths: strsToPaths([]string{
|
||||
"/foo/bar[somekey=someval][otherkey=otherval]",
|
||||
"/baz/qux[somekey=someval][otherkey=otherval]"}),
|
||||
exp: "/foo/bar[otherkey=otherval][somekey=someval]/" +
|
||||
"baz/qux[otherkey=otherval][somekey=someval]",
|
||||
},
|
||||
{
|
||||
paths: []*pb.Path{
|
||||
&pb.Path{Element: []string{"foo", "bar[somekey=someval][otherkey=otherval]"}},
|
||||
&pb.Path{Element: []string{"baz", "qux[somekey=someval][otherkey=otherval]"}}},
|
||||
exp: "/foo/bar[somekey=someval][otherkey=otherval]/" +
|
||||
"baz/qux[somekey=someval][otherkey=otherval]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
got := JoinPaths(tc.paths...)
|
||||
exp := strToPath(tc.exp)
|
||||
exp.Element = nil
|
||||
if !test.DeepEqual(got, exp) {
|
||||
t.Fatalf("ERROR!\n Got: %s,\n Want %s\n", got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPathElementToSigleElementName(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _, _ = parseElement("hello")
|
||||
|
2
vendor/github.com/aristanetworks/goarista/influxlib/lib.go
generated
vendored
2
vendor/github.com/aristanetworks/goarista/influxlib/lib.go
generated
vendored
@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
influxdb "github.com/influxdata/influxdb/client/v2"
|
||||
influxdb "github.com/influxdata/influxdb1-client/v2"
|
||||
)
|
||||
|
||||
// Row is defined as a map where the key (string) is the name of the
|
||||
|
6
vendor/github.com/aristanetworks/goarista/influxlib/testlib.go
generated
vendored
6
vendor/github.com/aristanetworks/goarista/influxlib/testlib.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
influx "github.com/influxdata/influxdb/client/v2"
|
||||
influx "github.com/influxdata/influxdb1-client/v2"
|
||||
)
|
||||
|
||||
// This will serve as a fake client object to test off of.
|
||||
@ -34,6 +34,10 @@ func (w *fakeClient) Query(q influx.Query) (*influx.Response, error) {
|
||||
return &influx.Response{Results: nil, Err: ""}, nil
|
||||
}
|
||||
|
||||
func (w *fakeClient) QueryAsChunk(q influx.Query) (*influx.ChunkedResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (w *fakeClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
34
vendor/github.com/aristanetworks/goarista/key/key.go
generated
vendored
34
vendor/github.com/aristanetworks/goarista/key/key.go
generated
vendored
@ -55,10 +55,10 @@ type int16Key int16
|
||||
type int32Key int32
|
||||
type int64Key int64
|
||||
|
||||
type uint8Key int8
|
||||
type uint16Key int16
|
||||
type uint32Key int32
|
||||
type uint64Key int64
|
||||
type uint8Key uint8
|
||||
type uint16Key uint16
|
||||
type uint32Key uint32
|
||||
type uint64Key uint64
|
||||
|
||||
type float32Key float32
|
||||
type float64Key float64
|
||||
@ -69,6 +69,8 @@ type pointerKey compositeKey
|
||||
|
||||
type pathKey compositeKey
|
||||
|
||||
type nilKey struct{}
|
||||
|
||||
func pathToSlice(path Path) []interface{} {
|
||||
s := make([]interface{}, len(path))
|
||||
for i, element := range path {
|
||||
@ -98,6 +100,8 @@ func sliceToPointer(s []interface{}) pointer {
|
||||
// doesn't implement value.Value.
|
||||
func New(intf interface{}) Key {
|
||||
switch t := intf.(type) {
|
||||
case nil:
|
||||
return nilKey{}
|
||||
case map[string]interface{}:
|
||||
return compositeKey{sentinel: sentinel, m: t}
|
||||
case []interface{}:
|
||||
@ -566,3 +570,25 @@ func (k pathKey) Equal(other interface{}) bool {
|
||||
}
|
||||
return ok && sliceToPath(k.s).Equal(key.Key())
|
||||
}
|
||||
|
||||
// Key interface implementation for nil
|
||||
func (k nilKey) Key() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k nilKey) String() string {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
func (k nilKey) GoString() string {
|
||||
return "key.New(nil)"
|
||||
}
|
||||
|
||||
func (k nilKey) MarshalJSON() ([]byte, error) {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
func (k nilKey) Equal(other interface{}) bool {
|
||||
_, ok := other.(nilKey)
|
||||
return ok
|
||||
}
|
||||
|
58
vendor/github.com/aristanetworks/goarista/key/key_test.go
generated
vendored
58
vendor/github.com/aristanetworks/goarista/key/key_test.go
generated
vendored
@ -42,6 +42,16 @@ func (c customKey) ToBuiltin() interface{} {
|
||||
return c.i
|
||||
}
|
||||
|
||||
var (
|
||||
nilIntf = interface{}(nil)
|
||||
nilMap = map[string]interface{}(nil)
|
||||
nilArr = []interface{}(nil)
|
||||
nilPath = Path(nil)
|
||||
|
||||
nilPtr Pointer
|
||||
nilVal value.Value
|
||||
)
|
||||
|
||||
func TestKeyEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
a Key
|
||||
@ -139,6 +149,46 @@ func TestKeyEqual(t *testing.T) {
|
||||
a: New(customKey{i: 42}),
|
||||
b: New(customKey{i: 42}),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nil),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nilIntf),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nilPtr),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nilVal),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nilMap),
|
||||
result: false,
|
||||
}, {
|
||||
a: New(nilMap),
|
||||
b: New(map[string]interface{}{}),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nilArr),
|
||||
result: false,
|
||||
}, {
|
||||
a: New(nilArr),
|
||||
b: New([]interface{}{}),
|
||||
result: true,
|
||||
}, {
|
||||
a: New(nil),
|
||||
b: New(nilPath),
|
||||
result: false,
|
||||
}, {
|
||||
a: New(nilPath),
|
||||
b: New(Path{}),
|
||||
result: true,
|
||||
}}
|
||||
|
||||
for _, tcase := range tests {
|
||||
@ -162,6 +212,11 @@ func TestGetFromMap(t *testing.T) {
|
||||
v interface{}
|
||||
found bool
|
||||
}{{
|
||||
k: New(nil),
|
||||
m: map[Key]interface{}{New(nil): nil},
|
||||
v: nil,
|
||||
found: true,
|
||||
}, {
|
||||
k: New("a"),
|
||||
m: map[Key]interface{}{New("a"): "b"},
|
||||
v: "b",
|
||||
@ -451,6 +506,9 @@ func TestGoString(t *testing.T) {
|
||||
in Key
|
||||
out string
|
||||
}{{
|
||||
in: New(nil),
|
||||
out: "key.New(nil)",
|
||||
}, {
|
||||
in: New(uint8(1)),
|
||||
out: "key.New(uint8(1))",
|
||||
}, {
|
||||
|
16
vendor/github.com/aristanetworks/goarista/key/path.go
generated
vendored
16
vendor/github.com/aristanetworks/goarista/key/path.go
generated
vendored
@ -5,8 +5,8 @@
|
||||
package key
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Path represents a path decomposed into elements where each
|
||||
@ -19,12 +19,18 @@ func (p Path) String() string {
|
||||
if len(p) == 0 {
|
||||
return "/"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
var b strings.Builder
|
||||
for _, element := range p {
|
||||
buf.WriteByte('/')
|
||||
buf.WriteString(element.String())
|
||||
b.WriteByte('/')
|
||||
// Use StringifyInterface instead of element.String() because
|
||||
// that will escape any invalid UTF-8.
|
||||
s, err := StringifyInterface(element.Key())
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to stringify %#v: %s", element, err))
|
||||
}
|
||||
b.WriteString(s)
|
||||
}
|
||||
return buf.String()
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a Path to JSON.
|
||||
|
13
vendor/github.com/aristanetworks/goarista/key/path_test.go
generated
vendored
13
vendor/github.com/aristanetworks/goarista/key/path_test.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/aristanetworks/goarista/key"
|
||||
"github.com/aristanetworks/goarista/path"
|
||||
@ -139,3 +140,15 @@ func TestPathAsKey(t *testing.T) {
|
||||
t.Errorf("customPath implementation not preserved: %T", b.Key())
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidUTF8(t *testing.T) {
|
||||
bytesAsString := string([]byte{0xFF, 0xFF, 0xFF, 0xFF})
|
||||
if utf8.ValidString(bytesAsString) {
|
||||
t.Fatalf("expected %q to be invalid utf8", bytesAsString)
|
||||
}
|
||||
p := key.Path{key.New(bytesAsString)}
|
||||
pathString := p.String()
|
||||
if !utf8.ValidString(pathString) {
|
||||
t.Errorf("expected %q to be valid utf8", pathString)
|
||||
}
|
||||
}
|
||||
|
77
vendor/github.com/aristanetworks/goarista/key/stringify.go
generated
vendored
77
vendor/github.com/aristanetworks/goarista/key/stringify.go
generated
vendored
@ -6,9 +6,9 @@ package key
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
@ -21,11 +21,10 @@ import (
|
||||
// representation of their keys as their names.
|
||||
// Note: this API is deprecated and will be removed.
|
||||
func StringifyInterface(key interface{}) (string, error) {
|
||||
if key == nil {
|
||||
return "", errors.New("Unable to stringify nil")
|
||||
}
|
||||
var str string
|
||||
switch key := key.(type) {
|
||||
case nil:
|
||||
return "<nil>", nil
|
||||
case bool:
|
||||
str = strconv.FormatBool(key)
|
||||
case uint8:
|
||||
@ -106,3 +105,73 @@ func stringify(key interface{}) string {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// StringifyCollection safely returns a string representation of a
|
||||
// map[Key]interface{} that is similar in form to the standard
|
||||
// stringification of a map, "map[k1:v1, k2:v2]". This differs from
|
||||
// StringifyInterface's handling of a map which emits a string to be
|
||||
// used as key in contexts such as JSON objects.
|
||||
func StringifyCollection(m map[Key]interface{}) string {
|
||||
type kv struct {
|
||||
key string
|
||||
val string
|
||||
}
|
||||
var length int
|
||||
kvs := make([]kv, 0, len(m))
|
||||
for k, v := range m {
|
||||
element := kv{
|
||||
key: stringifyCollectionHelper(k.Key()),
|
||||
val: stringifyCollectionHelper(v),
|
||||
}
|
||||
kvs = append(kvs, element)
|
||||
length += len(element.key) + len(element.val)
|
||||
}
|
||||
sort.Slice(kvs, func(i, j int) bool {
|
||||
return kvs[i].key < kvs[j].key
|
||||
})
|
||||
var buf strings.Builder
|
||||
buf.Grow(length + len("map[]") + 2*len(kvs) /* room for seperators: ", :" */)
|
||||
buf.WriteString("map[")
|
||||
for i, kv := range kvs {
|
||||
if i > 0 {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
buf.WriteString(kv.key)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(kv.val)
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// stringifyCollectionHelper is similar to StringifyInterface, but
|
||||
// optimizes for human readability instead of making a unique string
|
||||
// key suitable for JSON.
|
||||
func stringifyCollectionHelper(val interface{}) string {
|
||||
switch val := val.(type) {
|
||||
case string:
|
||||
return escape(val)
|
||||
case map[string]interface{}:
|
||||
keys := SortedKeys(val)
|
||||
for i, k := range keys {
|
||||
v := val[k]
|
||||
s := stringifyCollectionHelper(v)
|
||||
keys[i] = k + ":" + s
|
||||
}
|
||||
return "map[" + strings.Join(keys, " ") + "]"
|
||||
case map[Key]interface{}:
|
||||
return StringifyCollection(val)
|
||||
case []interface{}:
|
||||
elements := make([]string, len(val))
|
||||
for i, element := range val {
|
||||
elements[i] = stringifyCollectionHelper(element)
|
||||
}
|
||||
return strings.Join(elements, ",")
|
||||
case Pointer:
|
||||
return "{" + val.Pointer().String() + "}"
|
||||
case Path:
|
||||
return "[" + val.String() + "]"
|
||||
}
|
||||
|
||||
return fmt.Sprint(val)
|
||||
}
|
||||
|
90
vendor/github.com/aristanetworks/goarista/key/stringify_go112_test.go
generated
vendored
Normal file
90
vendor/github.com/aristanetworks/goarista/key/stringify_go112_test.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
// Format testing depends on map sorting during print introduced in
|
||||
// go1.12.
|
||||
|
||||
package key
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStringifyCollection(t *testing.T) {
|
||||
for name, tcase := range map[string]struct {
|
||||
input map[Key]interface{}
|
||||
output string
|
||||
}{
|
||||
"empty": {
|
||||
input: map[Key]interface{}{},
|
||||
output: "map[]",
|
||||
},
|
||||
"single": {
|
||||
input: map[Key]interface{}{
|
||||
New("foobar"): uint32(42),
|
||||
},
|
||||
output: "map[foobar:42]",
|
||||
},
|
||||
"double": {
|
||||
input: map[Key]interface{}{
|
||||
New("foobar"): uint32(42),
|
||||
New("baz"): uint32(11),
|
||||
},
|
||||
output: "map[baz:11 foobar:42]",
|
||||
},
|
||||
"map keys": {
|
||||
input: map[Key]interface{}{
|
||||
New(map[string]interface{}{"foo": uint32(1), "bar": uint32(2)}): uint32(42),
|
||||
New(map[string]interface{}{"foo": uint32(3), "bar": uint32(4)}): uint32(11),
|
||||
},
|
||||
output: "map[map[bar:2 foo:1]:42 map[bar:4 foo:3]:11]",
|
||||
},
|
||||
"string map in key map in string map in key map": {
|
||||
input: map[Key]interface{}{
|
||||
New(map[string]interface{}{"coll": map[Key]interface{}{
|
||||
New(map[string]interface{}{"one": "two"}): uint64(22),
|
||||
New(map[string]interface{}{"three": "four"}): uint64(33),
|
||||
}}): uint32(42),
|
||||
},
|
||||
output: "map[map[coll:map[map[one:two]:22 map[three:four]:33]]:42]",
|
||||
},
|
||||
"mixed types": {
|
||||
input: map[Key]interface{}{
|
||||
New(uint32(42)): true,
|
||||
New(float64(0.25)): 0.1,
|
||||
New(float32(0.5)): 0.2,
|
||||
New("foo"): "bar",
|
||||
New(map[string]interface{}{"hello": "world"}): "yolo",
|
||||
},
|
||||
output: "map[0.25:0.1 0.5:0.2 42:true foo:bar map[hello:world]:yolo]",
|
||||
}} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := StringifyCollection(tcase.input)
|
||||
if got != tcase.output {
|
||||
t.Errorf("expected: %q\ngot: %q", tcase.output, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringifyCollectionSameAsFmt(t *testing.T) {
|
||||
keyMap := map[Key]interface{}{
|
||||
New("bar"): uint32(2),
|
||||
New("foo"): uint32(1),
|
||||
}
|
||||
strMap := map[string]interface{}{
|
||||
"bar": uint32(2),
|
||||
"foo": uint32(1),
|
||||
}
|
||||
|
||||
got := StringifyCollection(keyMap)
|
||||
exp := fmt.Sprint(strMap)
|
||||
|
||||
if got != exp {
|
||||
t.Errorf("expected Fmt formatting to match StringifyCollection: exp: %s\ngot:%s", exp, got)
|
||||
}
|
||||
}
|
4
vendor/github.com/aristanetworks/goarista/key/stringify_test.go
generated
vendored
4
vendor/github.com/aristanetworks/goarista/key/stringify_test.go
generated
vendored
@ -17,7 +17,7 @@ func TestStringify(t *testing.T) {
|
||||
}{{
|
||||
name: "nil",
|
||||
input: nil,
|
||||
output: "Unable to stringify nil",
|
||||
output: "<nil>",
|
||||
}, {
|
||||
name: "struct{}",
|
||||
input: struct{}{},
|
||||
@ -118,7 +118,7 @@ func TestStringify(t *testing.T) {
|
||||
input: map[string]interface{}{
|
||||
"n": nil,
|
||||
},
|
||||
output: "Unable to stringify nil",
|
||||
output: "<nil>",
|
||||
}, {
|
||||
name: "[]interface{}",
|
||||
input: []interface{}{
|
||||
|
5
vendor/github.com/aristanetworks/goarista/lanz/client_test.go
generated
vendored
5
vendor/github.com/aristanetworks/goarista/lanz/client_test.go
generated
vendored
@ -15,7 +15,6 @@ import (
|
||||
|
||||
"github.com/aristanetworks/goarista/lanz"
|
||||
pb "github.com/aristanetworks/goarista/lanz/proto"
|
||||
"github.com/aristanetworks/goarista/test"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/golang/protobuf/proto"
|
||||
@ -158,7 +157,7 @@ func TestSuccessPath(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("Unexpected closed channel")
|
||||
}
|
||||
if !test.DeepEqual(p, r) {
|
||||
if !proto.Equal(p, r) {
|
||||
t.Fatalf("Test case %d: expected %v, but got %v", i, p, r)
|
||||
}
|
||||
}
|
||||
@ -283,7 +282,7 @@ func TestDefaultConnector(t *testing.T) {
|
||||
c.Stop()
|
||||
<-done
|
||||
|
||||
if !test.DeepEqual(p, testProtoBuf) {
|
||||
if !proto.Equal(p, testProtoBuf) {
|
||||
t.Fatalf("Expected protobuf %v, but got %v", testProtoBuf, p)
|
||||
}
|
||||
}
|
||||
|
414
vendor/github.com/aristanetworks/goarista/lanz/proto/lanz.pb.go
generated
vendored
414
vendor/github.com/aristanetworks/goarista/lanz/proto/lanz.pb.go
generated
vendored
@ -1,28 +1,16 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: lanz.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package proto is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
lanz.proto
|
||||
|
||||
It has these top-level messages:
|
||||
ConfigRecord
|
||||
GlobalBufferUsageRecord
|
||||
CongestionRecord
|
||||
ErrorRecord
|
||||
LanzRecord
|
||||
*/
|
||||
package proto
|
||||
|
||||
import proto1 "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto1.Marshal
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
@ -30,7 +18,7 @@ var _ = math.Inf
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type GlobalBufferUsageRecord_EntryType int32
|
||||
|
||||
@ -45,6 +33,7 @@ var GlobalBufferUsageRecord_EntryType_name = map[int32]string{
|
||||
2: "UPDATE",
|
||||
3: "HIGH",
|
||||
}
|
||||
|
||||
var GlobalBufferUsageRecord_EntryType_value = map[string]int32{
|
||||
"LOW": 1,
|
||||
"UPDATE": 2,
|
||||
@ -56,38 +45,45 @@ func (x GlobalBufferUsageRecord_EntryType) Enum() *GlobalBufferUsageRecord_Entry
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x GlobalBufferUsageRecord_EntryType) String() string {
|
||||
return proto1.EnumName(GlobalBufferUsageRecord_EntryType_name, int32(x))
|
||||
return proto.EnumName(GlobalBufferUsageRecord_EntryType_name, int32(x))
|
||||
}
|
||||
|
||||
func (x *GlobalBufferUsageRecord_EntryType) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto1.UnmarshalJSONEnum(GlobalBufferUsageRecord_EntryType_value, data, "GlobalBufferUsageRecord_EntryType")
|
||||
value, err := proto.UnmarshalJSONEnum(GlobalBufferUsageRecord_EntryType_value, data, "GlobalBufferUsageRecord_EntryType")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = GlobalBufferUsageRecord_EntryType(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (GlobalBufferUsageRecord_EntryType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{1, 0}
|
||||
return fileDescriptor_b8810f36c12659ec, []int{1, 0}
|
||||
}
|
||||
|
||||
type CongestionRecord_EntryType int32
|
||||
|
||||
const (
|
||||
CongestionRecord_START CongestionRecord_EntryType = 1
|
||||
CongestionRecord_UPDATE CongestionRecord_EntryType = 2
|
||||
CongestionRecord_END CongestionRecord_EntryType = 3
|
||||
CongestionRecord_START CongestionRecord_EntryType = 1
|
||||
CongestionRecord_UPDATE CongestionRecord_EntryType = 2
|
||||
CongestionRecord_END CongestionRecord_EntryType = 3
|
||||
CongestionRecord_POLLING CongestionRecord_EntryType = 4
|
||||
)
|
||||
|
||||
var CongestionRecord_EntryType_name = map[int32]string{
|
||||
1: "START",
|
||||
2: "UPDATE",
|
||||
3: "END",
|
||||
4: "POLLING",
|
||||
}
|
||||
|
||||
var CongestionRecord_EntryType_value = map[string]int32{
|
||||
"START": 1,
|
||||
"UPDATE": 2,
|
||||
"END": 3,
|
||||
"START": 1,
|
||||
"UPDATE": 2,
|
||||
"END": 3,
|
||||
"POLLING": 4,
|
||||
}
|
||||
|
||||
func (x CongestionRecord_EntryType) Enum() *CongestionRecord_EntryType {
|
||||
@ -95,19 +91,22 @@ func (x CongestionRecord_EntryType) Enum() *CongestionRecord_EntryType {
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x CongestionRecord_EntryType) String() string {
|
||||
return proto1.EnumName(CongestionRecord_EntryType_name, int32(x))
|
||||
return proto.EnumName(CongestionRecord_EntryType_name, int32(x))
|
||||
}
|
||||
|
||||
func (x *CongestionRecord_EntryType) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto1.UnmarshalJSONEnum(CongestionRecord_EntryType_value, data, "CongestionRecord_EntryType")
|
||||
value, err := proto.UnmarshalJSONEnum(CongestionRecord_EntryType_value, data, "CongestionRecord_EntryType")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = CongestionRecord_EntryType(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (CongestionRecord_EntryType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{2, 0}
|
||||
return fileDescriptor_b8810f36c12659ec, []int{2, 0}
|
||||
}
|
||||
|
||||
type ConfigRecord struct {
|
||||
@ -116,17 +115,40 @@ type ConfigRecord struct {
|
||||
NumOfPorts *uint32 `protobuf:"varint,3,req,name=numOfPorts" json:"numOfPorts,omitempty"`
|
||||
SegmentSize *uint32 `protobuf:"varint,4,req,name=segmentSize" json:"segmentSize,omitempty"`
|
||||
MaxQueueSize *uint32 `protobuf:"varint,5,req,name=maxQueueSize" json:"maxQueueSize,omitempty"`
|
||||
QLenInterval *uint32 `protobuf:"varint,10,opt,name=qLenInterval" json:"qLenInterval,omitempty"`
|
||||
PortConfigRecord []*ConfigRecord_PortConfigRecord `protobuf:"bytes,6,rep,name=portConfigRecord" json:"portConfigRecord,omitempty"`
|
||||
GlobalUsageHighThreshold *uint32 `protobuf:"varint,7,opt,name=globalUsageHighThreshold" json:"globalUsageHighThreshold,omitempty"`
|
||||
GlobalUsageLowThreshold *uint32 `protobuf:"varint,8,opt,name=globalUsageLowThreshold" json:"globalUsageLowThreshold,omitempty"`
|
||||
GlobalUsageReportingEnabled *bool `protobuf:"varint,9,opt,name=globalUsageReportingEnabled" json:"globalUsageReportingEnabled,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigRecord) Reset() { *m = ConfigRecord{} }
|
||||
func (m *ConfigRecord) String() string { return proto1.CompactTextString(m) }
|
||||
func (*ConfigRecord) ProtoMessage() {}
|
||||
func (*ConfigRecord) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (m *ConfigRecord) Reset() { *m = ConfigRecord{} }
|
||||
func (m *ConfigRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigRecord) ProtoMessage() {}
|
||||
func (*ConfigRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b8810f36c12659ec, []int{0}
|
||||
}
|
||||
|
||||
func (m *ConfigRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigRecord.Size(m)
|
||||
}
|
||||
func (m *ConfigRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigRecord) GetTimestamp() uint64 {
|
||||
if m != nil && m.Timestamp != nil {
|
||||
@ -163,6 +185,13 @@ func (m *ConfigRecord) GetMaxQueueSize() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ConfigRecord) GetQLenInterval() uint32 {
|
||||
if m != nil && m.QLenInterval != nil {
|
||||
return *m.QLenInterval
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ConfigRecord) GetPortConfigRecord() []*ConfigRecord_PortConfigRecord {
|
||||
if m != nil {
|
||||
return m.PortConfigRecord
|
||||
@ -192,22 +221,42 @@ func (m *ConfigRecord) GetGlobalUsageReportingEnabled() bool {
|
||||
}
|
||||
|
||||
type ConfigRecord_PortConfigRecord struct {
|
||||
IntfName *string `protobuf:"bytes,1,req,name=intfName" json:"intfName,omitempty"`
|
||||
SwitchId *uint32 `protobuf:"varint,2,req,name=switchId" json:"switchId,omitempty"`
|
||||
PortId *uint32 `protobuf:"varint,3,req,name=portId" json:"portId,omitempty"`
|
||||
InternalPort *bool `protobuf:"varint,4,req,name=internalPort" json:"internalPort,omitempty"`
|
||||
HighThreshold *uint32 `protobuf:"varint,5,req,name=highThreshold" json:"highThreshold,omitempty"`
|
||||
LowThreshold *uint32 `protobuf:"varint,6,req,name=lowThreshold" json:"lowThreshold,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
IntfName *string `protobuf:"bytes,1,req,name=intfName" json:"intfName,omitempty"`
|
||||
SwitchId *uint32 `protobuf:"varint,2,req,name=switchId" json:"switchId,omitempty"`
|
||||
PortId *uint32 `protobuf:"varint,3,req,name=portId" json:"portId,omitempty"`
|
||||
InternalPort *bool `protobuf:"varint,4,req,name=internalPort" json:"internalPort,omitempty"`
|
||||
HighThreshold *uint32 `protobuf:"varint,5,req,name=highThreshold" json:"highThreshold,omitempty"`
|
||||
LowThreshold *uint32 `protobuf:"varint,6,req,name=lowThreshold" json:"lowThreshold,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfigRecord_PortConfigRecord) Reset() { *m = ConfigRecord_PortConfigRecord{} }
|
||||
func (m *ConfigRecord_PortConfigRecord) String() string { return proto1.CompactTextString(m) }
|
||||
func (m *ConfigRecord_PortConfigRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigRecord_PortConfigRecord) ProtoMessage() {}
|
||||
func (*ConfigRecord_PortConfigRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{0, 0}
|
||||
return fileDescriptor_b8810f36c12659ec, []int{0, 0}
|
||||
}
|
||||
|
||||
func (m *ConfigRecord_PortConfigRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigRecord_PortConfigRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConfigRecord_PortConfigRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConfigRecord_PortConfigRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConfigRecord_PortConfigRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConfigRecord_PortConfigRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ConfigRecord_PortConfigRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_ConfigRecord_PortConfigRecord.Size(m)
|
||||
}
|
||||
func (m *ConfigRecord_PortConfigRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ConfigRecord_PortConfigRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ConfigRecord_PortConfigRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *ConfigRecord_PortConfigRecord) GetIntfName() string {
|
||||
if m != nil && m.IntfName != nil {
|
||||
return *m.IntfName
|
||||
@ -251,17 +300,39 @@ func (m *ConfigRecord_PortConfigRecord) GetLowThreshold() uint32 {
|
||||
}
|
||||
|
||||
type GlobalBufferUsageRecord struct {
|
||||
EntryType *GlobalBufferUsageRecord_EntryType `protobuf:"varint,1,opt,name=entryType,enum=LanzProtobuf.GlobalBufferUsageRecord_EntryType" json:"entryType,omitempty"`
|
||||
Timestamp *uint64 `protobuf:"varint,2,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
BufferSize *uint32 `protobuf:"varint,3,opt,name=bufferSize" json:"bufferSize,omitempty"`
|
||||
Duration *uint32 `protobuf:"varint,4,opt,name=duration" json:"duration,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
EntryType *GlobalBufferUsageRecord_EntryType `protobuf:"varint,1,opt,name=entryType,enum=LanzProtobuf.GlobalBufferUsageRecord_EntryType" json:"entryType,omitempty"`
|
||||
Timestamp *uint64 `protobuf:"varint,2,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
BufferSize *uint32 `protobuf:"varint,3,opt,name=bufferSize" json:"bufferSize,omitempty"`
|
||||
Duration *uint32 `protobuf:"varint,4,opt,name=duration" json:"duration,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GlobalBufferUsageRecord) Reset() { *m = GlobalBufferUsageRecord{} }
|
||||
func (m *GlobalBufferUsageRecord) String() string { return proto1.CompactTextString(m) }
|
||||
func (*GlobalBufferUsageRecord) ProtoMessage() {}
|
||||
func (*GlobalBufferUsageRecord) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (m *GlobalBufferUsageRecord) Reset() { *m = GlobalBufferUsageRecord{} }
|
||||
func (m *GlobalBufferUsageRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*GlobalBufferUsageRecord) ProtoMessage() {}
|
||||
func (*GlobalBufferUsageRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b8810f36c12659ec, []int{1}
|
||||
}
|
||||
|
||||
func (m *GlobalBufferUsageRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GlobalBufferUsageRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GlobalBufferUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GlobalBufferUsageRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GlobalBufferUsageRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GlobalBufferUsageRecord.Merge(m, src)
|
||||
}
|
||||
func (m *GlobalBufferUsageRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_GlobalBufferUsageRecord.Size(m)
|
||||
}
|
||||
func (m *GlobalBufferUsageRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GlobalBufferUsageRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GlobalBufferUsageRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *GlobalBufferUsageRecord) GetEntryType() GlobalBufferUsageRecord_EntryType {
|
||||
if m != nil && m.EntryType != nil {
|
||||
@ -292,24 +363,46 @@ func (m *GlobalBufferUsageRecord) GetDuration() uint32 {
|
||||
}
|
||||
|
||||
type CongestionRecord struct {
|
||||
Timestamp *uint64 `protobuf:"varint,1,req,name=timestamp" json:"timestamp,omitempty"`
|
||||
IntfName *string `protobuf:"bytes,2,req,name=intfName" json:"intfName,omitempty"`
|
||||
SwitchId *uint32 `protobuf:"varint,3,req,name=switchId" json:"switchId,omitempty"`
|
||||
PortId *uint32 `protobuf:"varint,4,req,name=portId" json:"portId,omitempty"`
|
||||
QueueSize *uint32 `protobuf:"varint,5,req,name=queueSize" json:"queueSize,omitempty"`
|
||||
EntryType *CongestionRecord_EntryType `protobuf:"varint,6,opt,name=entryType,enum=LanzProtobuf.CongestionRecord_EntryType" json:"entryType,omitempty"`
|
||||
TrafficClass *uint32 `protobuf:"varint,7,opt,name=trafficClass" json:"trafficClass,omitempty"`
|
||||
TimeOfMaxQLen *uint64 `protobuf:"varint,8,opt,name=timeOfMaxQLen" json:"timeOfMaxQLen,omitempty"`
|
||||
TxLatency *uint32 `protobuf:"varint,9,opt,name=txLatency" json:"txLatency,omitempty"`
|
||||
QDropCount *uint32 `protobuf:"varint,10,opt,name=qDropCount" json:"qDropCount,omitempty"`
|
||||
FabricPeerIntfName *string `protobuf:"bytes,11,opt,name=fabricPeerIntfName" json:"fabricPeerIntfName,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
Timestamp *uint64 `protobuf:"varint,1,req,name=timestamp" json:"timestamp,omitempty"`
|
||||
IntfName *string `protobuf:"bytes,2,req,name=intfName" json:"intfName,omitempty"`
|
||||
SwitchId *uint32 `protobuf:"varint,3,req,name=switchId" json:"switchId,omitempty"`
|
||||
PortId *uint32 `protobuf:"varint,4,req,name=portId" json:"portId,omitempty"`
|
||||
QueueSize *uint32 `protobuf:"varint,5,req,name=queueSize" json:"queueSize,omitempty"`
|
||||
EntryType *CongestionRecord_EntryType `protobuf:"varint,6,opt,name=entryType,enum=LanzProtobuf.CongestionRecord_EntryType" json:"entryType,omitempty"`
|
||||
TrafficClass *uint32 `protobuf:"varint,7,opt,name=trafficClass" json:"trafficClass,omitempty"`
|
||||
TimeOfMaxQLen *uint64 `protobuf:"varint,8,opt,name=timeOfMaxQLen" json:"timeOfMaxQLen,omitempty"`
|
||||
TxLatency *uint32 `protobuf:"varint,9,opt,name=txLatency" json:"txLatency,omitempty"`
|
||||
QDropCount *uint32 `protobuf:"varint,10,opt,name=qDropCount" json:"qDropCount,omitempty"`
|
||||
FabricPeerIntfName *string `protobuf:"bytes,11,opt,name=fabricPeerIntfName" json:"fabricPeerIntfName,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CongestionRecord) Reset() { *m = CongestionRecord{} }
|
||||
func (m *CongestionRecord) String() string { return proto1.CompactTextString(m) }
|
||||
func (*CongestionRecord) ProtoMessage() {}
|
||||
func (*CongestionRecord) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
func (m *CongestionRecord) Reset() { *m = CongestionRecord{} }
|
||||
func (m *CongestionRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*CongestionRecord) ProtoMessage() {}
|
||||
func (*CongestionRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b8810f36c12659ec, []int{2}
|
||||
}
|
||||
|
||||
func (m *CongestionRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CongestionRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CongestionRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CongestionRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CongestionRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CongestionRecord.Merge(m, src)
|
||||
}
|
||||
func (m *CongestionRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_CongestionRecord.Size(m)
|
||||
}
|
||||
func (m *CongestionRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CongestionRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CongestionRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *CongestionRecord) GetTimestamp() uint64 {
|
||||
if m != nil && m.Timestamp != nil {
|
||||
@ -389,15 +482,37 @@ func (m *CongestionRecord) GetFabricPeerIntfName() string {
|
||||
}
|
||||
|
||||
type ErrorRecord struct {
|
||||
Timestamp *uint64 `protobuf:"varint,1,req,name=timestamp" json:"timestamp,omitempty"`
|
||||
ErrorMessage *string `protobuf:"bytes,2,req,name=errorMessage" json:"errorMessage,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
Timestamp *uint64 `protobuf:"varint,1,req,name=timestamp" json:"timestamp,omitempty"`
|
||||
ErrorMessage *string `protobuf:"bytes,2,req,name=errorMessage" json:"errorMessage,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ErrorRecord) Reset() { *m = ErrorRecord{} }
|
||||
func (m *ErrorRecord) String() string { return proto1.CompactTextString(m) }
|
||||
func (*ErrorRecord) ProtoMessage() {}
|
||||
func (*ErrorRecord) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
func (m *ErrorRecord) Reset() { *m = ErrorRecord{} }
|
||||
func (m *ErrorRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*ErrorRecord) ProtoMessage() {}
|
||||
func (*ErrorRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b8810f36c12659ec, []int{3}
|
||||
}
|
||||
|
||||
func (m *ErrorRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ErrorRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ErrorRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ErrorRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ErrorRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ErrorRecord.Merge(m, src)
|
||||
}
|
||||
func (m *ErrorRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_ErrorRecord.Size(m)
|
||||
}
|
||||
func (m *ErrorRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ErrorRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ErrorRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *ErrorRecord) GetTimestamp() uint64 {
|
||||
if m != nil && m.Timestamp != nil {
|
||||
@ -418,13 +533,35 @@ type LanzRecord struct {
|
||||
CongestionRecord *CongestionRecord `protobuf:"bytes,2,opt,name=congestionRecord" json:"congestionRecord,omitempty"`
|
||||
ErrorRecord *ErrorRecord `protobuf:"bytes,3,opt,name=errorRecord" json:"errorRecord,omitempty"`
|
||||
GlobalBufferUsageRecord *GlobalBufferUsageRecord `protobuf:"bytes,4,opt,name=globalBufferUsageRecord" json:"globalBufferUsageRecord,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LanzRecord) Reset() { *m = LanzRecord{} }
|
||||
func (m *LanzRecord) String() string { return proto1.CompactTextString(m) }
|
||||
func (*LanzRecord) ProtoMessage() {}
|
||||
func (*LanzRecord) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
func (m *LanzRecord) Reset() { *m = LanzRecord{} }
|
||||
func (m *LanzRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*LanzRecord) ProtoMessage() {}
|
||||
func (*LanzRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b8810f36c12659ec, []int{4}
|
||||
}
|
||||
|
||||
func (m *LanzRecord) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LanzRecord.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LanzRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LanzRecord.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *LanzRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LanzRecord.Merge(m, src)
|
||||
}
|
||||
func (m *LanzRecord) XXX_Size() int {
|
||||
return xxx_messageInfo_LanzRecord.Size(m)
|
||||
}
|
||||
func (m *LanzRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LanzRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LanzRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *LanzRecord) GetConfigRecord() *ConfigRecord {
|
||||
if m != nil {
|
||||
@ -455,64 +592,65 @@ func (m *LanzRecord) GetGlobalBufferUsageRecord() *GlobalBufferUsageRecord {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto1.RegisterType((*ConfigRecord)(nil), "LanzProtobuf.ConfigRecord")
|
||||
proto1.RegisterType((*ConfigRecord_PortConfigRecord)(nil), "LanzProtobuf.ConfigRecord.PortConfigRecord")
|
||||
proto1.RegisterType((*GlobalBufferUsageRecord)(nil), "LanzProtobuf.GlobalBufferUsageRecord")
|
||||
proto1.RegisterType((*CongestionRecord)(nil), "LanzProtobuf.CongestionRecord")
|
||||
proto1.RegisterType((*ErrorRecord)(nil), "LanzProtobuf.ErrorRecord")
|
||||
proto1.RegisterType((*LanzRecord)(nil), "LanzProtobuf.LanzRecord")
|
||||
proto1.RegisterEnum("LanzProtobuf.GlobalBufferUsageRecord_EntryType", GlobalBufferUsageRecord_EntryType_name, GlobalBufferUsageRecord_EntryType_value)
|
||||
proto1.RegisterEnum("LanzProtobuf.CongestionRecord_EntryType", CongestionRecord_EntryType_name, CongestionRecord_EntryType_value)
|
||||
proto.RegisterEnum("LanzProtobuf.GlobalBufferUsageRecord_EntryType", GlobalBufferUsageRecord_EntryType_name, GlobalBufferUsageRecord_EntryType_value)
|
||||
proto.RegisterEnum("LanzProtobuf.CongestionRecord_EntryType", CongestionRecord_EntryType_name, CongestionRecord_EntryType_value)
|
||||
proto.RegisterType((*ConfigRecord)(nil), "LanzProtobuf.ConfigRecord")
|
||||
proto.RegisterType((*ConfigRecord_PortConfigRecord)(nil), "LanzProtobuf.ConfigRecord.PortConfigRecord")
|
||||
proto.RegisterType((*GlobalBufferUsageRecord)(nil), "LanzProtobuf.GlobalBufferUsageRecord")
|
||||
proto.RegisterType((*CongestionRecord)(nil), "LanzProtobuf.CongestionRecord")
|
||||
proto.RegisterType((*ErrorRecord)(nil), "LanzProtobuf.ErrorRecord")
|
||||
proto.RegisterType((*LanzRecord)(nil), "LanzProtobuf.LanzRecord")
|
||||
}
|
||||
|
||||
func init() { proto1.RegisterFile("lanz.proto", fileDescriptor0) }
|
||||
func init() { proto.RegisterFile("lanz.proto", fileDescriptor_b8810f36c12659ec) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 726 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x6e, 0xda, 0x4a,
|
||||
0x10, 0x96, 0x31, 0x7f, 0x1e, 0xc3, 0x91, 0xb5, 0x17, 0x27, 0x3e, 0x39, 0x51, 0x64, 0x59, 0xad,
|
||||
0x64, 0x35, 0x12, 0x95, 0xb8, 0xaa, 0x5a, 0xa9, 0x6a, 0x42, 0x68, 0x42, 0x45, 0x02, 0xd9, 0x90,
|
||||
0x46, 0xea, 0x4d, 0xb5, 0xc0, 0x1a, 0x2c, 0x99, 0x35, 0x59, 0x1b, 0xe5, 0xe7, 0x49, 0xfa, 0x24,
|
||||
0x7d, 0x89, 0xde, 0xf4, 0x49, 0xfa, 0x0c, 0xd5, 0x2e, 0x04, 0x76, 0x21, 0x44, 0xb9, 0x63, 0xbf,
|
||||
0x99, 0xf9, 0xbc, 0x33, 0xdf, 0x7c, 0x0b, 0x40, 0x4c, 0xd8, 0x43, 0x6d, 0xca, 0x93, 0x2c, 0x41,
|
||||
0x95, 0x36, 0x61, 0x0f, 0x5d, 0xf1, 0xb3, 0x3f, 0x0b, 0xfd, 0x1f, 0x05, 0xa8, 0x34, 0x12, 0x16,
|
||||
0x46, 0x23, 0x4c, 0x07, 0x09, 0x1f, 0xa2, 0x3d, 0xb0, 0xb2, 0x68, 0x42, 0xd3, 0x8c, 0x4c, 0xa6,
|
||||
0xae, 0xe1, 0xe5, 0x82, 0x3c, 0x5e, 0x01, 0xc8, 0x03, 0x5b, 0x50, 0x7d, 0xa5, 0x3c, 0x8d, 0x12,
|
||||
0xe6, 0xe6, 0xbc, 0x5c, 0x50, 0xc5, 0x2a, 0x84, 0xf6, 0x01, 0xd8, 0x6c, 0xd2, 0x09, 0xbb, 0x09,
|
||||
0xcf, 0x52, 0xd7, 0x94, 0x09, 0x0a, 0x22, 0x18, 0x52, 0x3a, 0x9a, 0x50, 0x96, 0x5d, 0x46, 0x0f,
|
||||
0xd4, 0xcd, 0xcf, 0x19, 0x14, 0x08, 0xf9, 0x50, 0x99, 0x90, 0xbb, 0x8b, 0x19, 0x9d, 0x51, 0x99,
|
||||
0x52, 0x90, 0x29, 0x1a, 0x86, 0xae, 0xc1, 0x99, 0x26, 0x3c, 0x53, 0x6f, 0xee, 0x16, 0x3d, 0x33,
|
||||
0xb0, 0xeb, 0x07, 0x35, 0xb5, 0xbf, 0x9a, 0x9a, 0x51, 0xeb, 0xae, 0x95, 0xe0, 0x0d, 0x12, 0xf4,
|
||||
0x1e, 0xdc, 0x51, 0x9c, 0xf4, 0x49, 0x7c, 0x95, 0x92, 0x11, 0x3d, 0x8d, 0x46, 0xe3, 0xde, 0x98,
|
||||
0xd3, 0x74, 0x9c, 0xc4, 0x43, 0xb7, 0xe4, 0x19, 0x41, 0x15, 0x6f, 0x8d, 0xa3, 0x77, 0xb0, 0xa3,
|
||||
0xc4, 0xda, 0xc9, 0xed, 0xaa, 0xb4, 0x2c, 0x4b, 0xb7, 0x85, 0xd1, 0x27, 0xf8, 0x5f, 0x09, 0x61,
|
||||
0x2a, 0xae, 0x15, 0xb1, 0x51, 0x93, 0x91, 0x7e, 0x4c, 0x87, 0xae, 0xe5, 0x19, 0x41, 0x19, 0x3f,
|
||||
0x97, 0xb2, 0xfb, 0xdb, 0x00, 0x67, 0xbd, 0x3d, 0xb4, 0x0b, 0xe5, 0x88, 0x65, 0xe1, 0x39, 0x99,
|
||||
0x50, 0x29, 0xa5, 0x85, 0x97, 0x67, 0x11, 0x4b, 0x6f, 0xa3, 0x6c, 0x30, 0x6e, 0x0d, 0x17, 0x32,
|
||||
0x2e, 0xcf, 0xe8, 0x5f, 0x28, 0x0a, 0xfa, 0xd6, 0x70, 0xa1, 0xdf, 0xe2, 0x24, 0x94, 0x89, 0x58,
|
||||
0x46, 0x39, 0x23, 0xb1, 0xf8, 0x96, 0x14, 0xaf, 0x8c, 0x35, 0x0c, 0xbd, 0x82, 0xea, 0x58, 0x9b,
|
||||
0xda, 0x5c, 0x3e, 0x1d, 0x14, 0x4c, 0xb1, 0x3a, 0x9f, 0xe2, 0x5c, 0x63, 0x15, 0xf3, 0xff, 0x18,
|
||||
0xb0, 0x73, 0x22, 0x5b, 0x3e, 0x9a, 0x85, 0x21, 0xe5, 0x8b, 0xc6, 0x65, 0x67, 0x67, 0x60, 0x51,
|
||||
0x96, 0xf1, 0xfb, 0xde, 0xfd, 0x54, 0xb4, 0x66, 0x04, 0xff, 0xd4, 0xdf, 0xea, 0xc2, 0x6f, 0xa9,
|
||||
0xac, 0x35, 0x1f, 0xcb, 0xf0, 0x8a, 0x41, 0x5f, 0xfa, 0x9c, 0x67, 0xe8, 0x4b, 0xbf, 0x0f, 0xd0,
|
||||
0x97, 0x3c, 0x72, 0x1d, 0x4d, 0x29, 0xa5, 0x82, 0x88, 0x51, 0x0e, 0x67, 0x9c, 0x64, 0xc2, 0x11,
|
||||
0x79, 0x19, 0x5d, 0x9e, 0xfd, 0x37, 0x60, 0x2d, 0xbf, 0x88, 0x4a, 0x60, 0xb6, 0x3b, 0xd7, 0x8e,
|
||||
0x81, 0x00, 0x8a, 0x57, 0xdd, 0xe3, 0xc3, 0x5e, 0xd3, 0xc9, 0xa1, 0x32, 0xe4, 0x4f, 0x5b, 0x27,
|
||||
0xa7, 0x8e, 0xe9, 0xff, 0x32, 0xc1, 0x69, 0x24, 0x6c, 0x44, 0x53, 0x51, 0xfa, 0x22, 0x3f, 0xaa,
|
||||
0x0a, 0xe7, 0x9e, 0x51, 0xd8, 0xdc, 0xaa, 0x70, 0x5e, 0x53, 0x78, 0x0f, 0xac, 0x9b, 0x35, 0xe3,
|
||||
0xad, 0x00, 0xf4, 0x59, 0x9d, 0x7a, 0x51, 0x4e, 0x3d, 0xd8, 0xb0, 0x9b, 0x76, 0xfd, 0xa7, 0xc7,
|
||||
0xed, 0x43, 0x25, 0xe3, 0x24, 0x0c, 0xa3, 0x41, 0x23, 0x26, 0x69, 0xba, 0x30, 0x96, 0x86, 0x89,
|
||||
0x3d, 0x12, 0x6d, 0x76, 0xc2, 0x33, 0x72, 0x77, 0xd1, 0xa6, 0x4c, 0x5a, 0x28, 0x8f, 0x75, 0x50,
|
||||
0x4e, 0xe7, 0xae, 0x4d, 0x32, 0xca, 0x06, 0xf7, 0xd2, 0x26, 0x55, 0xbc, 0x02, 0x84, 0x70, 0x37,
|
||||
0xc7, 0x3c, 0x99, 0x36, 0x92, 0x19, 0xcb, 0x5c, 0x98, 0x0b, 0xb7, 0x42, 0x50, 0x0d, 0x50, 0x48,
|
||||
0xfa, 0x3c, 0x1a, 0x74, 0x29, 0xe5, 0xad, 0xc7, 0x39, 0xda, 0x9e, 0x11, 0x58, 0xf8, 0x89, 0x88,
|
||||
0x7f, 0xa0, 0x8a, 0x69, 0x41, 0xe1, 0xb2, 0x77, 0x88, 0x7b, 0x6b, 0x72, 0x96, 0xc0, 0x6c, 0x9e,
|
||||
0x1f, 0x3b, 0xa6, 0xdf, 0x01, 0xbb, 0xc9, 0x79, 0xc2, 0x5f, 0xa4, 0xa3, 0x0f, 0x15, 0x2a, 0x92,
|
||||
0xcf, 0x68, 0x2a, 0x76, 0x75, 0xa1, 0xa5, 0x86, 0xf9, 0x3f, 0x73, 0x00, 0x62, 0xd8, 0x0b, 0xc2,
|
||||
0x8f, 0x50, 0x19, 0xa8, 0xcf, 0x9f, 0x70, 0x81, 0x5d, 0xdf, 0xdd, 0xfe, 0xfc, 0x61, 0x2d, 0x1f,
|
||||
0x7d, 0x01, 0x67, 0xb0, 0xa6, 0x96, 0x5c, 0x7d, 0xbb, 0xbe, 0xff, 0xbc, 0xa6, 0x78, 0xa3, 0x0e,
|
||||
0x7d, 0x00, 0x9b, 0xae, 0x7a, 0x95, 0x16, 0xb1, 0xeb, 0xff, 0xe9, 0x34, 0xca, 0x30, 0xb0, 0x9a,
|
||||
0x8d, 0xbe, 0x3f, 0x3e, 0x9b, 0x1b, 0x66, 0x95, 0x6e, 0xb2, 0xeb, 0xaf, 0x5f, 0xe4, 0x6c, 0xbc,
|
||||
0x8d, 0xe5, 0xa8, 0xf4, 0xad, 0x20, 0xff, 0xfa, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xcf,
|
||||
0xcf, 0x51, 0x07, 0x07, 0x00, 0x00,
|
||||
var fileDescriptor_b8810f36c12659ec = []byte{
|
||||
// 750 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x4f, 0xdb, 0x4a,
|
||||
0x14, 0x95, 0xe3, 0x7c, 0xf9, 0x3a, 0x3c, 0x59, 0xb3, 0x78, 0xf8, 0xf1, 0x10, 0xb2, 0xac, 0xf7,
|
||||
0xa4, 0xa8, 0x95, 0x52, 0x29, 0x2b, 0xd4, 0x4a, 0x55, 0x21, 0xa4, 0x90, 0xca, 0x90, 0x30, 0x84,
|
||||
0x22, 0x75, 0x53, 0x4d, 0x92, 0x71, 0x62, 0xc9, 0x19, 0x87, 0xb1, 0x53, 0x3e, 0x7e, 0x58, 0x57,
|
||||
0xfd, 0x11, 0x5d, 0xf6, 0x57, 0xf4, 0x37, 0x54, 0x33, 0x98, 0x64, 0x26, 0x21, 0x88, 0x1d, 0x73,
|
||||
0xee, 0xbd, 0x27, 0xbe, 0xe7, 0xcc, 0x19, 0x00, 0x62, 0xc2, 0xee, 0x1b, 0x33, 0x9e, 0x64, 0x09,
|
||||
0xaa, 0x05, 0x84, 0xdd, 0xf7, 0xc4, 0x9f, 0x83, 0x79, 0xe8, 0xff, 0x28, 0x41, 0xad, 0x95, 0xb0,
|
||||
0x30, 0x1a, 0x63, 0x3a, 0x4c, 0xf8, 0x08, 0xed, 0x82, 0x95, 0x45, 0x53, 0x9a, 0x66, 0x64, 0x3a,
|
||||
0x73, 0x0d, 0xaf, 0x50, 0x2f, 0xe2, 0x25, 0x80, 0x3c, 0xb0, 0x05, 0xd5, 0x67, 0xca, 0xd3, 0x28,
|
||||
0x61, 0x6e, 0xc1, 0x2b, 0xd4, 0xb7, 0xb0, 0x0a, 0xa1, 0x3d, 0x00, 0x36, 0x9f, 0x76, 0xc3, 0x5e,
|
||||
0xc2, 0xb3, 0xd4, 0x35, 0x65, 0x83, 0x82, 0x08, 0x86, 0x94, 0x8e, 0xa7, 0x94, 0x65, 0x17, 0xd1,
|
||||
0x3d, 0x75, 0x8b, 0x0f, 0x0c, 0x0a, 0x84, 0x7c, 0xa8, 0x4d, 0xc9, 0xed, 0xf9, 0x9c, 0xce, 0xa9,
|
||||
0x6c, 0x29, 0xc9, 0x16, 0x0d, 0x13, 0x3d, 0xd7, 0x01, 0x65, 0x1d, 0x96, 0x51, 0xfe, 0x8d, 0xc4,
|
||||
0x2e, 0x78, 0x86, 0xe8, 0x51, 0x31, 0x74, 0x05, 0xce, 0x2c, 0xe1, 0x99, 0xba, 0x9d, 0x5b, 0xf6,
|
||||
0xcc, 0xba, 0xdd, 0x7c, 0xdd, 0x50, 0x35, 0x68, 0xa8, 0x1d, 0x8d, 0xde, 0xca, 0x08, 0x5e, 0x23,
|
||||
0x41, 0x6f, 0xc1, 0x1d, 0xc7, 0xc9, 0x80, 0xc4, 0x97, 0x29, 0x19, 0xd3, 0x93, 0x68, 0x3c, 0xe9,
|
||||
0x4f, 0x38, 0x4d, 0x27, 0x49, 0x3c, 0x72, 0x2b, 0xf2, 0x43, 0x36, 0xd6, 0xd1, 0x3e, 0x6c, 0x2b,
|
||||
0xb5, 0x20, 0xb9, 0x59, 0x8e, 0x56, 0xe5, 0xe8, 0xa6, 0x32, 0xfa, 0x00, 0xff, 0x2a, 0x25, 0x4c,
|
||||
0xc5, 0x67, 0x45, 0x6c, 0xdc, 0x66, 0x64, 0x10, 0xd3, 0x91, 0x6b, 0x79, 0x46, 0xbd, 0x8a, 0x9f,
|
||||
0x6b, 0xd9, 0xf9, 0x69, 0x80, 0xb3, 0xba, 0x1e, 0xda, 0x81, 0x6a, 0xc4, 0xb2, 0xf0, 0x8c, 0x4c,
|
||||
0xa9, 0xb4, 0xdb, 0xc2, 0x8b, 0xb3, 0xa8, 0xa5, 0x37, 0x51, 0x36, 0x9c, 0x74, 0x46, 0xb9, 0xd5,
|
||||
0x8b, 0x33, 0xfa, 0x1b, 0xca, 0x82, 0xbe, 0x33, 0xca, 0x3d, 0xce, 0x4f, 0xc2, 0x99, 0x48, 0x38,
|
||||
0xc0, 0x48, 0x2c, 0x7e, 0x4b, 0x1a, 0x5c, 0xc5, 0x1a, 0x86, 0xfe, 0x83, 0xad, 0x89, 0xa6, 0xda,
|
||||
0x83, 0xc5, 0x3a, 0x28, 0x98, 0x62, 0x55, 0x9f, 0xf2, 0xc3, 0x3d, 0x50, 0x31, 0xff, 0xb7, 0x01,
|
||||
0xdb, 0xc7, 0x72, 0xe5, 0xc3, 0x79, 0x18, 0x52, 0x9e, 0x2f, 0x2e, 0x37, 0x3b, 0x05, 0x8b, 0xb2,
|
||||
0x8c, 0xdf, 0xf5, 0xef, 0x66, 0x62, 0x35, 0xa3, 0xfe, 0x57, 0xf3, 0x8d, 0x6e, 0xfc, 0x86, 0xc9,
|
||||
0x46, 0xfb, 0x71, 0x0c, 0x2f, 0x19, 0xf4, 0x60, 0x14, 0x3c, 0x43, 0x0f, 0xc6, 0x1e, 0xc0, 0x40,
|
||||
0xf2, 0xc8, 0x2b, 0x6b, 0x4a, 0x2b, 0x15, 0x44, 0x48, 0x39, 0x9a, 0x73, 0x92, 0x89, 0xd4, 0x14,
|
||||
0x65, 0x75, 0x71, 0xf6, 0x5f, 0x81, 0xb5, 0xf8, 0x45, 0x54, 0x01, 0x33, 0xe8, 0x5e, 0x39, 0x06,
|
||||
0x02, 0x28, 0x5f, 0xf6, 0x8e, 0x0e, 0xfa, 0x6d, 0xa7, 0x80, 0xaa, 0x50, 0x3c, 0xe9, 0x1c, 0x9f,
|
||||
0x38, 0xa6, 0xff, 0xcb, 0x04, 0xa7, 0x95, 0xb0, 0x31, 0x4d, 0xc5, 0xe8, 0x8b, 0x32, 0xab, 0x3a,
|
||||
0x5c, 0x78, 0xc6, 0x61, 0x73, 0xa3, 0xc3, 0x45, 0xcd, 0xe1, 0x5d, 0xb0, 0xae, 0x57, 0xc2, 0xb9,
|
||||
0x04, 0xd0, 0x47, 0x55, 0xf5, 0xb2, 0x54, 0xbd, 0xbe, 0x16, 0x37, 0xed, 0xf3, 0x9f, 0x96, 0xdb,
|
||||
0x87, 0x5a, 0xc6, 0x49, 0x18, 0x46, 0xc3, 0x56, 0x4c, 0xd2, 0x34, 0x0f, 0x96, 0x86, 0x89, 0x7b,
|
||||
0x24, 0xd6, 0xec, 0x86, 0xa7, 0xe4, 0xf6, 0x3c, 0xa0, 0x4c, 0x46, 0xa8, 0x88, 0x75, 0x50, 0xaa,
|
||||
0x73, 0x1b, 0x90, 0x8c, 0xb2, 0xe1, 0x9d, 0x8c, 0xc9, 0x16, 0x5e, 0x02, 0xc2, 0xb8, 0xeb, 0x23,
|
||||
0x9e, 0xcc, 0x5a, 0xc9, 0x9c, 0x65, 0xf9, 0x3b, 0xa2, 0x20, 0xa8, 0x01, 0x28, 0x24, 0x03, 0x1e,
|
||||
0x0d, 0x7b, 0x94, 0xf2, 0xce, 0xa3, 0x8e, 0xb6, 0x67, 0xd4, 0x2d, 0xfc, 0x44, 0xc5, 0xdf, 0x57,
|
||||
0xcd, 0xb4, 0xa0, 0x74, 0xd1, 0x3f, 0xc0, 0xfd, 0x15, 0x3b, 0x2b, 0x60, 0xb6, 0xcf, 0x8e, 0x1c,
|
||||
0x13, 0xd9, 0x50, 0xe9, 0x75, 0x83, 0xa0, 0x73, 0x76, 0xec, 0x14, 0xfd, 0x2e, 0xd8, 0x6d, 0xce,
|
||||
0x13, 0xfe, 0x22, 0x53, 0x7d, 0xa8, 0x51, 0xd1, 0x7c, 0x4a, 0x53, 0x71, 0x71, 0x73, 0x63, 0x35,
|
||||
0xcc, 0xff, 0x5e, 0x00, 0x10, 0xca, 0xe7, 0x84, 0xef, 0xa1, 0x36, 0x54, 0xdf, 0x42, 0x11, 0x09,
|
||||
0xbb, 0xb9, 0xb3, 0xf9, 0x2d, 0xc4, 0x5a, 0x3f, 0xfa, 0x04, 0xce, 0x70, 0xc5, 0x3a, 0x99, 0x03,
|
||||
0xbb, 0xb9, 0xf7, 0xbc, 0xc1, 0x78, 0x6d, 0x0e, 0xbd, 0x03, 0x9b, 0x2e, 0x77, 0x95, 0x79, 0xb1,
|
||||
0x9b, 0xff, 0xe8, 0x34, 0x8a, 0x18, 0x58, 0xed, 0x46, 0x5f, 0x1f, 0xdf, 0xd0, 0xb5, 0xe4, 0xca,
|
||||
0x68, 0xd9, 0xcd, 0xff, 0x5f, 0x14, 0x73, 0xbc, 0x89, 0xe5, 0xb0, 0xf2, 0xa5, 0x24, 0xff, 0x57,
|
||||
0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x01, 0xb9, 0xd3, 0x0b, 0x38, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
2
vendor/github.com/aristanetworks/goarista/lanz/proto/lanz.proto
generated
vendored
2
vendor/github.com/aristanetworks/goarista/lanz/proto/lanz.proto
generated
vendored
@ -18,6 +18,7 @@ message ConfigRecord {
|
||||
required uint32 numOfPorts = 3; // Num of ports in the switch
|
||||
required uint32 segmentSize = 4; // Segement size
|
||||
required uint32 maxQueueSize = 5; // Maximum queue size in segments
|
||||
optional uint32 qLenInterval = 10; // Frequency of update
|
||||
message PortConfigRecord {
|
||||
required string intfName = 1; // Name of the port
|
||||
required uint32 switchId = 2; // Id of the chip on a multi-chip system
|
||||
@ -54,6 +55,7 @@ message CongestionRecord {
|
||||
START = 1;
|
||||
UPDATE = 2;
|
||||
END = 3;
|
||||
POLLING = 4;
|
||||
}
|
||||
optional EntryType entryType = 6; // Type of entry
|
||||
optional uint32 trafficClass = 7; // Traffic Class
|
||||
|
4
vendor/github.com/aristanetworks/goarista/monitor/map.go
generated
vendored
4
vendor/github.com/aristanetworks/goarista/monitor/map.go
generated
vendored
@ -11,9 +11,9 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -25,7 +25,7 @@ type Map struct {
|
||||
}
|
||||
|
||||
func (v *Map) String() string {
|
||||
var b bytes.Buffer
|
||||
var b strings.Builder
|
||||
b.WriteByte('{')
|
||||
first := true
|
||||
v.m.Range(func(k, value interface{}) bool {
|
||||
|
17
vendor/github.com/aristanetworks/goarista/monitor/server.go
generated
vendored
17
vendor/github.com/aristanetworks/goarista/monitor/server.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
// Server represents a monitoring server
|
||||
type Server interface {
|
||||
Run(serveMux *http.ServeMux)
|
||||
Serve(serveMux *http.ServeMux) error
|
||||
}
|
||||
|
||||
// server contains information for the monitoring server
|
||||
@ -72,8 +73,15 @@ func histogramHandler(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
// Run sets up the HTTP server and any handlers
|
||||
// Run calls Serve. On error the program exits.
|
||||
func (s *server) Run(serveMux *http.ServeMux) {
|
||||
if err := s.Serve(serveMux); err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Serve registers handlers and starts serving.
|
||||
func (s *server) Serve(serveMux *http.ServeMux) error {
|
||||
serveMux.HandleFunc("/debug", debugHandler)
|
||||
serveMux.HandleFunc("/debug/histograms", histogramHandler)
|
||||
|
||||
@ -84,11 +92,8 @@ func (s *server) Run(serveMux *http.ServeMux) {
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not start monitor server in VRF %q: %s", s.vrfName, err)
|
||||
return fmt.Errorf("could not start monitor server in VRF %q: %s", s.vrfName, err)
|
||||
}
|
||||
|
||||
err = http.Serve(listener, serveMux)
|
||||
if err != nil {
|
||||
glog.Fatal("http serve returned with error:", err)
|
||||
}
|
||||
return http.Serve(listener, serveMux)
|
||||
}
|
||||
|
12
vendor/github.com/aristanetworks/goarista/monitor/stats/histogram.go
generated
vendored
12
vendor/github.com/aristanetworks/goarista/monitor/stats/histogram.go
generated
vendored
@ -104,15 +104,15 @@ func (v HistogramValue) MarshalJSON() ([]byte, error) {
|
||||
percentMulti = 100 / float64(v.Count)
|
||||
}
|
||||
fmt.Fprintf(&b,
|
||||
`{"stats":{"count":%d,"min":%d,"max":%d,"avg":%.2f}, "buckets": {`,
|
||||
`{"stats":{"count":%d,"min":%d,"max":%d,"avg":%.2f}, "buckets": [`,
|
||||
v.Count, min, max, avg)
|
||||
|
||||
for i, bucket := range v.Buckets {
|
||||
fmt.Fprintf(&b, `"[%d,`, bucket.LowBound)
|
||||
fmt.Fprintf(&b, `{"range":"[%d,`, bucket.LowBound)
|
||||
if i+1 < len(v.Buckets) {
|
||||
fmt.Fprintf(&b, `%d)":{`, v.Buckets[i+1].LowBound)
|
||||
fmt.Fprintf(&b, `%d)",`, v.Buckets[i+1].LowBound)
|
||||
} else {
|
||||
fmt.Fprintf(&b, `inf)":{`)
|
||||
fmt.Fprintf(&b, `inf)",`)
|
||||
}
|
||||
|
||||
fmt.Fprintf(&b, `"count":%d,"percentage":%.1f}`,
|
||||
@ -121,13 +121,13 @@ func (v HistogramValue) MarshalJSON() ([]byte, error) {
|
||||
fmt.Fprintf(&b, ",")
|
||||
}
|
||||
}
|
||||
fmt.Fprint(&b, `}}`)
|
||||
fmt.Fprint(&b, `]}`)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// String returns the textual output of the histogram values as string.
|
||||
func (v HistogramValue) String() string {
|
||||
var b bytes.Buffer
|
||||
var b strings.Builder
|
||||
v.PrintChart(&b)
|
||||
return b.String()
|
||||
}
|
||||
|
153
vendor/github.com/aristanetworks/goarista/monitor/stats/histogram_test.go
generated
vendored
153
vendor/github.com/aristanetworks/goarista/monitor/stats/histogram_test.go
generated
vendored
@ -5,24 +5,155 @@
|
||||
package stats_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/aristanetworks/goarista/monitor/stats"
|
||||
)
|
||||
|
||||
func testJSON(h *stats.Histogram) error {
|
||||
js, err := h.Value().MarshalJSON()
|
||||
var expected0 = []byte(`{
|
||||
"stats": {
|
||||
"count": 0,
|
||||
"min": 0,
|
||||
"max": 0,
|
||||
"avg": 0.00
|
||||
},
|
||||
"buckets": [
|
||||
{
|
||||
"range": "[0,20)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[20,40)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[40,60)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[60,80)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[80,100)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[100,120)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[120,140)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[140,160)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[160,180)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[180,inf)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
var expected42 = []byte(`{
|
||||
"stats": {
|
||||
"count": 1,
|
||||
"min": 42,
|
||||
"max": 42,
|
||||
"avg": 42.00
|
||||
},
|
||||
"buckets": [
|
||||
{
|
||||
"range": "[0,20)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[20,40)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[40,60)",
|
||||
"count": 1,
|
||||
"percentage": 100.0
|
||||
},
|
||||
{
|
||||
"range": "[60,80)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[80,100)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[100,120)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[120,140)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[140,160)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[160,180)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
},
|
||||
{
|
||||
"range": "[180,inf)",
|
||||
"count": 0,
|
||||
"percentage": 0.0
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
func testJSON(t *testing.T, h *stats.Histogram, exp []byte) {
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
enc.SetIndent("", " ")
|
||||
err := enc.Encode(h.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), exp) {
|
||||
t.Error("unexpected json")
|
||||
t.Errorf("Expected: %s", exp)
|
||||
t.Errorf("Got: %s", buf.Bytes())
|
||||
}
|
||||
var v interface{}
|
||||
err = json.Unmarshal(js, &v)
|
||||
err = json.Unmarshal(buf.Bytes(), &v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse JSON: %s\nJSON was: %s", err, js)
|
||||
t.Errorf("Failed to parse JSON: %s\nJSON was: %s", err, buf.Bytes())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure we can JSONify the histogram into valid JSON.
|
||||
@ -30,7 +161,9 @@ func TestJSON(t *testing.T) {
|
||||
h := stats.NewHistogram(
|
||||
stats.HistogramOptions{NumBuckets: 10, GrowthFactor: 0,
|
||||
SmallestBucketSize: 20, MinValue: 0})
|
||||
testJSON(h)
|
||||
h.Add(42)
|
||||
testJSON(h)
|
||||
testJSON(t, h, expected0)
|
||||
if err := h.Add(42); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testJSON(t, h, expected42)
|
||||
}
|
||||
|
49
vendor/github.com/aristanetworks/goarista/netns/netns.go
generated
vendored
49
vendor/github.com/aristanetworks/goarista/netns/netns.go
generated
vendored
@ -8,6 +8,8 @@ package netns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,3 +45,50 @@ func setNsByName(nsName string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do takes a function which it will call in the network namespace specified by nsName.
|
||||
// The goroutine that calls this will lock itself to its current OS thread, hop
|
||||
// namespaces, call the given function, hop back to its original namespace, and then
|
||||
// unlock itself from its current OS thread.
|
||||
// Do returns an error if an error occurs at any point besides in the invocation of
|
||||
// the given function, or if the given function itself returns an error.
|
||||
//
|
||||
// The callback function is expected to do something simple such as just
|
||||
// creating a socket / opening a connection, as it's not desirable to start
|
||||
// complex logic in a goroutine that is pinned to the current OS thread.
|
||||
// Also any goroutine started from the callback function may or may not
|
||||
// execute in the desired namespace.
|
||||
func Do(nsName string, cb Callback) error {
|
||||
// If destNS is empty, the function is called in the caller's namespace
|
||||
if nsName == "" {
|
||||
return cb()
|
||||
}
|
||||
|
||||
// Get the file descriptor to the current namespace
|
||||
currNsFd, err := getNs(selfNsFile)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("File descriptor to current namespace does not exist: %s", err)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Failed to open %s: %s", selfNsFile, err)
|
||||
}
|
||||
defer currNsFd.close()
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Jump to the new network namespace
|
||||
if err := setNsByName(nsName); err != nil {
|
||||
return fmt.Errorf("Failed to set the namespace to %s: %s", nsName, err)
|
||||
}
|
||||
|
||||
// Call the given function
|
||||
cbErr := cb()
|
||||
|
||||
// Come back to the original namespace
|
||||
if err = setNs(currNsFd); err != nil {
|
||||
return fmt.Errorf("Failed to return to the original namespace: %s (callback returned %v)",
|
||||
err, cbErr)
|
||||
}
|
||||
|
||||
return cbErr
|
||||
}
|
||||
|
60
vendor/github.com/aristanetworks/goarista/netns/netns_110.go
generated
vendored
60
vendor/github.com/aristanetworks/goarista/netns/netns_110.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package netns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Do takes a function which it will call in the network namespace specified by nsName.
|
||||
// The goroutine that calls this will lock itself to its current OS thread, hop
|
||||
// namespaces, call the given function, hop back to its original namespace, and then
|
||||
// unlock itself from its current OS thread.
|
||||
// Do returns an error if an error occurs at any point besides in the invocation of
|
||||
// the given function, or if the given function itself returns an error.
|
||||
//
|
||||
// The callback function is expected to do something simple such as just
|
||||
// creating a socket / opening a connection, as it's not desirable to start
|
||||
// complex logic in a goroutine that is pinned to the current OS thread.
|
||||
// Also any goroutine started from the callback function may or may not
|
||||
// execute in the desired namespace.
|
||||
func Do(nsName string, cb Callback) error {
|
||||
// If destNS is empty, the function is called in the caller's namespace
|
||||
if nsName == "" {
|
||||
return cb()
|
||||
}
|
||||
|
||||
// Get the file descriptor to the current namespace
|
||||
currNsFd, err := getNs(selfNsFile)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("File descriptor to current namespace does not exist: %s", err)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Failed to open %s: %s", selfNsFile, err)
|
||||
}
|
||||
defer currNsFd.close()
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Jump to the new network namespace
|
||||
if err := setNsByName(nsName); err != nil {
|
||||
return fmt.Errorf("Failed to set the namespace to %s: %s", nsName, err)
|
||||
}
|
||||
|
||||
// Call the given function
|
||||
cbErr := cb()
|
||||
|
||||
// Come back to the original namespace
|
||||
if err = setNs(currNsFd); err != nil {
|
||||
return fmt.Errorf("Failed to return to the original namespace: %s (callback returned %v)",
|
||||
err, cbErr)
|
||||
}
|
||||
|
||||
return cbErr
|
||||
}
|
@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// +build !linux
|
||||
|
||||
package netns
|
||||
|
||||
// stub: close closes the file descriptor mapped to a network namespace
|
64
vendor/github.com/aristanetworks/goarista/netns/netns_pre110.go
generated
vendored
64
vendor/github.com/aristanetworks/goarista/netns/netns_pre110.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
// Copyright (c) 2016 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package netns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Do takes a function which it will call in the network namespace specified by nsName.
|
||||
// The goroutine that calls this will lock itself to its current OS thread, hop
|
||||
// namespaces, call the given function, hop back to its original namespace, and then
|
||||
// unlock itself from its current OS thread.
|
||||
// Do returns an error if an error occurs at any point besides in the invocation of
|
||||
// the given function, or if the given function itself returns an error.
|
||||
//
|
||||
// The callback function is expected to do something simple such as just
|
||||
// creating a socket / opening a connection, and you should not kick off any
|
||||
// complex logic from the callback or call any complicated code or create any
|
||||
// new goroutine from the callback. The callback should not panic or use defer.
|
||||
// The behavior of this function is undefined if the callback doesn't conform
|
||||
// these demands.
|
||||
//go:nosplit
|
||||
func Do(nsName string, cb Callback) error {
|
||||
// If destNS is empty, the function is called in the caller's namespace
|
||||
if nsName == "" {
|
||||
return cb()
|
||||
}
|
||||
|
||||
// Get the file descriptor to the current namespace
|
||||
currNsFd, err := getNs(selfNsFile)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("File descriptor to current namespace does not exist: %s", err)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Failed to open %s: %s", selfNsFile, err)
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
|
||||
// Jump to the new network namespace
|
||||
if err := setNsByName(nsName); err != nil {
|
||||
runtime.UnlockOSThread()
|
||||
currNsFd.close()
|
||||
return fmt.Errorf("Failed to set the namespace to %s: %s", nsName, err)
|
||||
}
|
||||
|
||||
// Call the given function
|
||||
cbErr := cb()
|
||||
|
||||
// Come back to the original namespace
|
||||
if err = setNs(currNsFd); err != nil {
|
||||
cbErr = fmt.Errorf("Failed to return to the original namespace: %s (callback returned %v)",
|
||||
err, cbErr)
|
||||
}
|
||||
|
||||
runtime.UnlockOSThread()
|
||||
currNsFd.close()
|
||||
return cbErr
|
||||
}
|
213
vendor/github.com/aristanetworks/goarista/netns/nslistener.go
generated
vendored
Normal file
213
vendor/github.com/aristanetworks/goarista/netns/nslistener.go
generated
vendored
Normal file
@ -0,0 +1,213 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package netns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aristanetworks/fsnotify"
|
||||
"github.com/aristanetworks/glog"
|
||||
"github.com/aristanetworks/goarista/dscp"
|
||||
)
|
||||
|
||||
var makeListener = func(nsName string, addr *net.TCPAddr, tos byte) (net.Listener, error) {
|
||||
var listener net.Listener
|
||||
err := Do(nsName, func() error {
|
||||
var err error
|
||||
listener, err = dscp.ListenTCPWithTOS(addr, tos)
|
||||
return err
|
||||
})
|
||||
return listener, err
|
||||
}
|
||||
|
||||
func accept(listener net.Listener, conns chan<- net.Conn) {
|
||||
for {
|
||||
c, err := listener.Accept()
|
||||
if err != nil {
|
||||
glog.Infof("Accept error: %v", err)
|
||||
return
|
||||
}
|
||||
conns <- c
|
||||
}
|
||||
}
|
||||
|
||||
func waitForMount(mountPoint string) bool {
|
||||
for !hasMount(mountPoint) {
|
||||
time.Sleep(time.Second)
|
||||
if _, err := os.Stat(mountPoint); err != nil {
|
||||
glog.Infof("error stating %s: %v", mountPoint, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// nsListener is a net.Listener that binds to a specific network namespace when it becomes available
|
||||
// and in case it gets deleted and recreated it will automatically bind to the newly created
|
||||
// namespace.
|
||||
type nsListener struct {
|
||||
listener net.Listener
|
||||
watcher *fsnotify.Watcher
|
||||
nsName string
|
||||
nsFile string
|
||||
addr *net.TCPAddr
|
||||
tos byte
|
||||
done chan struct{}
|
||||
conns chan net.Conn
|
||||
}
|
||||
|
||||
func (l *nsListener) tearDown() {
|
||||
if l.listener != nil {
|
||||
glog.Info("Destroying listener")
|
||||
l.listener.Close()
|
||||
l.listener = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (l *nsListener) setUp() bool {
|
||||
glog.Infof("Creating listener in namespace %v", l.nsName)
|
||||
if err := l.watcher.Add(l.nsFile); err != nil {
|
||||
glog.Infof("Can't watch the file (will try again): %v", err)
|
||||
return false
|
||||
}
|
||||
listener, err := makeListener(l.nsName, l.addr, l.tos)
|
||||
if err != nil {
|
||||
glog.Infof("Can't create TCP listener (will try again): %v", err)
|
||||
return false
|
||||
}
|
||||
l.listener = listener
|
||||
go accept(l.listener, l.conns)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *nsListener) watch() {
|
||||
var mounted bool
|
||||
if hasMount(l.nsFile) {
|
||||
mounted = l.setUp()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.done:
|
||||
l.tearDown()
|
||||
go func() {
|
||||
// Drain the events, otherwise closing the watcher will get stuck
|
||||
for range l.watcher.Events {
|
||||
}
|
||||
}()
|
||||
l.watcher.Close()
|
||||
close(l.conns)
|
||||
return
|
||||
case ev := <-l.watcher.Events:
|
||||
if ev.Name != l.nsFile {
|
||||
continue
|
||||
}
|
||||
if ev.Op&fsnotify.Create == fsnotify.Create {
|
||||
if mounted || !waitForMount(l.nsFile) {
|
||||
continue
|
||||
}
|
||||
mounted = l.setUp()
|
||||
}
|
||||
if ev.Op&fsnotify.Remove == fsnotify.Remove {
|
||||
l.tearDown()
|
||||
mounted = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *nsListener) setupWatch() error {
|
||||
w, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.Add(filepath.Dir(l.nsFile)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.watcher = w
|
||||
go l.watch()
|
||||
return nil
|
||||
}
|
||||
|
||||
func newNSListenerWithDir(nsDir, nsName string, addr *net.TCPAddr, tos byte) (net.Listener, error) {
|
||||
l := &nsListener{
|
||||
nsName: nsName,
|
||||
nsFile: filepath.Join(nsDir, nsName),
|
||||
addr: addr,
|
||||
tos: tos,
|
||||
done: make(chan struct{}),
|
||||
conns: make(chan net.Conn),
|
||||
}
|
||||
if err := l.setupWatch(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Accept accepts a connection on the listener socket.
|
||||
func (l *nsListener) Accept() (net.Conn, error) {
|
||||
if c, ok := <-l.conns; ok {
|
||||
return c, nil
|
||||
}
|
||||
return nil, errors.New("listener closed")
|
||||
}
|
||||
|
||||
// Close closes the listener.
|
||||
func (l *nsListener) Close() error {
|
||||
close(l.done)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns the local address of the listener.
|
||||
func (l *nsListener) Addr() net.Addr {
|
||||
return l.addr
|
||||
}
|
||||
|
||||
func hasMountInProcMounts(r io.Reader, mountPoint string) bool {
|
||||
// Kernels up to 3.18 export the namespace via procfs and later ones via nsfs
|
||||
fsTypes := map[string]bool{"proc": true, "nsfs": true}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
l := scanner.Text()
|
||||
comps := strings.SplitN(l, " ", 3)
|
||||
if len(comps) != 3 || !fsTypes[comps[0]] {
|
||||
continue
|
||||
}
|
||||
if comps[1] == mountPoint {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getNsDirFromProcMounts(r io.Reader) (string, error) {
|
||||
// Newer EOS versions mount netns under /run
|
||||
dirs := map[string]bool{"/var/run/netns": true, "/run/netns": true}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
l := scanner.Text()
|
||||
comps := strings.SplitN(l, " ", 3)
|
||||
if len(comps) != 3 || !dirs[comps[1]] {
|
||||
continue
|
||||
}
|
||||
return comps[1], nil
|
||||
}
|
||||
|
||||
return "", errors.New("can't find the netns mount dir")
|
||||
}
|
49
vendor/github.com/aristanetworks/goarista/netns/nslistener_linux.go
generated
vendored
Normal file
49
vendor/github.com/aristanetworks/goarista/netns/nslistener_linux.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package netns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
)
|
||||
|
||||
var hasMount = func(mountPoint string) bool {
|
||||
fd, err := os.Open("/proc/mounts")
|
||||
if err != nil {
|
||||
glog.Fatalf("can't open /proc/mounts")
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
return hasMountInProcMounts(fd, mountPoint)
|
||||
}
|
||||
|
||||
func getNsDir() (string, error) {
|
||||
fd, err := os.Open("/proc/mounts")
|
||||
if err != nil {
|
||||
return "", errors.New("can't open /proc/mounts")
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
return getNsDirFromProcMounts(fd)
|
||||
}
|
||||
|
||||
// NewNSListener creates a new net.Listener bound to a network namespace. The listening socket will
|
||||
// be bound to the specified local address and will have the specified tos.
|
||||
func NewNSListener(nsName string, addr *net.TCPAddr, tos byte) (net.Listener, error) {
|
||||
// The default namespace doesn't get recreated and avoid the watcher helps with environments
|
||||
// that aren't setup for multiple namespaces (eg inside containers)
|
||||
if nsName == "" || nsName == "default" {
|
||||
return makeListener(nsName, addr, tos)
|
||||
}
|
||||
nsDir, err := getNsDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newNSListenerWithDir(nsDir, nsName, addr, tos)
|
||||
}
|
19
vendor/github.com/aristanetworks/goarista/netns/nslistener_other.go
generated
vendored
Normal file
19
vendor/github.com/aristanetworks/goarista/netns/nslistener_other.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
// +build !linux
|
||||
|
||||
package netns
|
||||
|
||||
import "net"
|
||||
|
||||
var hasMount = func(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NewNSListener creates a new net.Listener bound to a network namespace. The listening socket will
|
||||
// be bound to the specified local address and will have the specified tos.
|
||||
func NewNSListener(nsName string, addr *net.TCPAddr, tos byte) (net.Listener, error) {
|
||||
return makeListener(nsName, addr, tos)
|
||||
}
|
439
vendor/github.com/aristanetworks/goarista/netns/nslistener_test.go
generated
vendored
Normal file
439
vendor/github.com/aristanetworks/goarista/netns/nslistener_test.go
generated
vendored
Normal file
@ -0,0 +1,439 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package netns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type mockListener struct {
|
||||
makes int
|
||||
accepts int
|
||||
closes int
|
||||
maxAccepts int
|
||||
stop chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (l *mockListener) Accept() (net.Conn, error) {
|
||||
if l.accepts >= l.maxAccepts {
|
||||
<-l.stop
|
||||
return nil, errors.New("closed")
|
||||
}
|
||||
l.accepts++
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (l *mockListener) Close() error {
|
||||
l.closes++
|
||||
close(l.stop)
|
||||
close(l.done)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *mockListener) Addr() net.Addr {
|
||||
return nil
|
||||
}
|
||||
|
||||
var currentMockListener struct {
|
||||
mu sync.RWMutex
|
||||
listener *mockListener
|
||||
}
|
||||
|
||||
func makeMockListener(n int) func(string, *net.TCPAddr, byte) (net.Listener, error) {
|
||||
return func(_ string, _ *net.TCPAddr, _ byte) (net.Listener, error) {
|
||||
currentMockListener.mu.Lock()
|
||||
defer currentMockListener.mu.Unlock()
|
||||
currentMockListener.listener = &mockListener{
|
||||
maxAccepts: n,
|
||||
stop: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
currentMockListener.listener.makes++
|
||||
return currentMockListener.listener, nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestNSListener(t *testing.T) {
|
||||
makeListener = makeMockListener(1)
|
||||
hasMount = func(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
nsDir, err := ioutil.TempDir("", "netns")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't create temp file: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(nsDir)
|
||||
|
||||
l, err := newNSListenerWithDir(nsDir, "ns-yolo", nil, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't create mock listener: %v", err)
|
||||
}
|
||||
|
||||
var listener *mockListener
|
||||
nsFile := filepath.Join(nsDir, "ns-yolo")
|
||||
for i := 1; i <= 3; i++ {
|
||||
if err = ioutil.WriteFile(nsFile, []byte{}, os.FileMode(0777)); err != nil {
|
||||
t.Fatalf("Can't create ns file: %v", err)
|
||||
}
|
||||
if _, err = l.Accept(); err != nil {
|
||||
t.Fatalf("Unexpected accept error: %v", err)
|
||||
}
|
||||
|
||||
currentMockListener.mu.RLock()
|
||||
if listener == currentMockListener.listener {
|
||||
t.Fatalf("%v: listener hasn't changed", i)
|
||||
}
|
||||
listener = currentMockListener.listener
|
||||
currentMockListener.mu.RUnlock()
|
||||
|
||||
os.Remove(nsFile)
|
||||
<-listener.done
|
||||
|
||||
if listener.makes != 1 {
|
||||
t.Fatalf("%v: Expected makeListener to be called once, but it was called %v times", i,
|
||||
listener.makes)
|
||||
}
|
||||
if listener.accepts != 1 {
|
||||
t.Fatalf("%v: Expected accept to be called once, but it was called %v times", i,
|
||||
listener.accepts)
|
||||
}
|
||||
if listener.closes != 1 {
|
||||
t.Fatalf("%v: Expected close to be called once, but it was called %v times", i,
|
||||
listener.closes)
|
||||
}
|
||||
}
|
||||
|
||||
l.Close()
|
||||
}
|
||||
|
||||
func TestNSListenerClose(t *testing.T) {
|
||||
makeListener = makeMockListener(0)
|
||||
hasMount = func(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
nsDir, err := ioutil.TempDir("", "netns")
|
||||
if err != nil {
|
||||
t.Fatalf("Can't create temp file: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(nsDir)
|
||||
|
||||
l, err := newNSListenerWithDir(nsDir, "ns-yolo", nil, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't create mock listener: %v", err)
|
||||
}
|
||||
|
||||
nsFile := filepath.Join(nsDir, "ns-yolo")
|
||||
if err = ioutil.WriteFile(nsFile, []byte{}, os.FileMode(0777)); err != nil {
|
||||
t.Fatalf("Can't create ns file: %v", err)
|
||||
}
|
||||
defer os.Remove(nsFile)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
l.Accept()
|
||||
close(done)
|
||||
}()
|
||||
l.Close()
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestHasMount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
desc: "Mounted as nsfs",
|
||||
input: `
|
||||
none / aufs rw,relatime,si=7aaed56e5ecd215c 0 0
|
||||
none /.overlay tmpfs rw,relatime,size=593256k,mode=755,idr=enabled 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,size=8192k,nr_inodes=485215,mode=755 0 0
|
||||
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
|
||||
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
|
||||
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
|
||||
tmpfs /sys/fs/cgroup tmpfs rw,nosuid,nodev,noexec,mode=755 0 0
|
||||
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
|
||||
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
|
||||
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
|
||||
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
|
||||
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
|
||||
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
|
||||
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
|
||||
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
|
||||
configfs /sys/kernel/config configfs rw,relatime 0 0
|
||||
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
|
||||
tmpfs /tmp tmpfs rw,size=593256k 0 0
|
||||
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
|
||||
mqueue /dev/mqueue mqueue rw,relatime 0 0
|
||||
tmpfs /.deltas tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /.deltas/var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /var/tmp tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/core tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/log tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/shmem tmpfs rw,relatime,size=988756k 0 0
|
||||
/monitor /monitor debugfs rw,relatime 0 0
|
||||
/dev/sda1 /mnt/flash vfat rw,dirsync,noatime,gid=88,fmask=0007,dmask=0007,allow_utime=0020 0 0
|
||||
nsfs /var/run/netns/default nsfs rw 0 0
|
||||
nsfs /.deltas/var/run/netns/default nsfs rw 0 0
|
||||
nsfs /var/run/netns/ns-OOB-Management nsfs rw 0 0
|
||||
nsfs /.deltas/var/run/netns/ns-OOB-Management nsfs rw 0 0
|
||||
`,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Mounted as proc",
|
||||
input: `
|
||||
none / aufs rw,relatime,si=7aaed56e5ecd215c 0 0
|
||||
none /.overlay tmpfs rw,relatime,size=593256k,mode=755,idr=enabled 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,size=8192k,nr_inodes=485215,mode=755 0 0
|
||||
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
|
||||
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
|
||||
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
|
||||
tmpfs /sys/fs/cgroup tmpfs rw,nosuid,nodev,noexec,mode=755 0 0
|
||||
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
|
||||
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
|
||||
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
|
||||
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
|
||||
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
|
||||
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
|
||||
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
|
||||
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
|
||||
configfs /sys/kernel/config configfs rw,relatime 0 0
|
||||
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
|
||||
tmpfs /tmp tmpfs rw,size=593256k 0 0
|
||||
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
|
||||
mqueue /dev/mqueue mqueue rw,relatime 0 0
|
||||
tmpfs /.deltas tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /.deltas/var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /var/tmp tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/core tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/log tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/shmem tmpfs rw,relatime,size=988756k 0 0
|
||||
/monitor /monitor debugfs rw,relatime 0 0
|
||||
/dev/sda1 /mnt/flash vfat rw,dirsync,noatime,gid=88,fmask=0007,dmask=0007,allow_utime=0020 0 0
|
||||
proc /var/run/netns/default proc rw 0 0
|
||||
proc /.deltas/var/run/netns/default proc rw 0 0
|
||||
proc /var/run/netns/ns-OOB-Management proc rw 0 0
|
||||
proc /.deltas/var/run/netns/ns-OOB-Management proc rw 0 0
|
||||
`,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: "Not mounted",
|
||||
input: `
|
||||
none / aufs rw,relatime,si=7aaed56e5ecd215c 0 0
|
||||
none /.overlay tmpfs rw,relatime,size=593256k,mode=755,idr=enabled 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,size=8192k,nr_inodes=485215,mode=755 0 0
|
||||
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
|
||||
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
|
||||
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
|
||||
tmpfs /sys/fs/cgroup tmpfs rw,nosuid,nodev,noexec,mode=755 0 0
|
||||
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
|
||||
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
|
||||
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
|
||||
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
|
||||
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
|
||||
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
|
||||
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
|
||||
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
|
||||
configfs /sys/kernel/config configfs rw,relatime 0 0
|
||||
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
|
||||
tmpfs /tmp tmpfs rw,size=593256k 0 0
|
||||
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
|
||||
mqueue /dev/mqueue mqueue rw,relatime 0 0
|
||||
tmpfs /.deltas tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /.deltas/var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /var/tmp tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/core tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/log tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/shmem tmpfs rw,relatime,size=988756k 0 0
|
||||
/monitor /monitor debugfs rw,relatime 0 0
|
||||
/dev/sda1 /mnt/flash vfat rw,dirsync,noatime,gid=88,fmask=0007,dmask=0007,allow_utime=0020 0 0
|
||||
proc /var/run/netns/default proc rw 0 0
|
||||
proc /.deltas/var/run/netns/default proc rw 0 0
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
rdr := strings.NewReader(tc.input)
|
||||
if r := hasMountInProcMounts(rdr, "/var/run/netns/ns-OOB-Management"); r != tc.expected {
|
||||
t.Errorf("%v: unexpected result %v, expected %v", tc.desc, r, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNsDir(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
input string
|
||||
expected string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
desc: "Mounted in /var/run/netns",
|
||||
input: `
|
||||
none / aufs rw,relatime,si=7aaed56e5ecd215c 0 0
|
||||
none /.overlay tmpfs rw,relatime,size=593256k,mode=755,idr=enabled 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,size=8192k,nr_inodes=485215,mode=755 0 0
|
||||
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
|
||||
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
|
||||
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
|
||||
tmpfs /sys/fs/cgroup tmpfs rw,nosuid,nodev,noexec,mode=755 0 0
|
||||
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
|
||||
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
|
||||
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
|
||||
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
|
||||
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
|
||||
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
|
||||
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
|
||||
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
|
||||
configfs /sys/kernel/config configfs rw,relatime 0 0
|
||||
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
|
||||
tmpfs /tmp tmpfs rw,size=593256k 0 0
|
||||
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
|
||||
mqueue /dev/mqueue mqueue rw,relatime 0 0
|
||||
tmpfs /.deltas tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /.deltas/var/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /var/tmp tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/core tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/log tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/shmem tmpfs rw,relatime,size=988756k 0 0
|
||||
/monitor /monitor debugfs rw,relatime 0 0
|
||||
/dev/sda1 /mnt/flash vfat rw,dirsync,noatime,gid=88,fmask=0007,dmask=0007,allow_utime=0020 0 0
|
||||
nsfs /var/run/netns/default nsfs rw 0 0
|
||||
nsfs /.deltas/var/run/netns/default nsfs rw 0 0
|
||||
nsfs /var/run/netns/ns-OOB-Management nsfs rw 0 0
|
||||
nsfs /.deltas/var/run/netns/ns-OOB-Management nsfs rw 0 0
|
||||
`,
|
||||
expected: "/var/run/netns",
|
||||
},
|
||||
{
|
||||
desc: "Mounted in /run/netns",
|
||||
input: `
|
||||
none / aufs rw,relatime,si=7aaed56e5ecd215c 0 0
|
||||
none /.overlay tmpfs rw,relatime,size=593256k,mode=755,idr=enabled 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,size=8192k,nr_inodes=485215,mode=755 0 0
|
||||
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
|
||||
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
|
||||
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
|
||||
tmpfs /sys/fs/cgroup tmpfs rw,nosuid,nodev,noexec,mode=755 0 0
|
||||
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
|
||||
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
|
||||
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
|
||||
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
|
||||
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
|
||||
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
|
||||
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
|
||||
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
|
||||
configfs /sys/kernel/config configfs rw,relatime 0 0
|
||||
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
|
||||
tmpfs /tmp tmpfs rw,size=593256k 0 0
|
||||
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
|
||||
mqueue /dev/mqueue mqueue rw,relatime 0 0
|
||||
tmpfs /.deltas tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /.deltas/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /var/tmp tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/core tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/log tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/shmem tmpfs rw,relatime,size=988756k 0 0
|
||||
/monitor /monitor debugfs rw,relatime 0 0
|
||||
/dev/sda1 /mnt/flash vfat rw,dirsync,noatime,gid=88,fmask=0007,dmask=0007,allow_utime=0020 0 0
|
||||
nsfs /run/netns/default nsfs rw 0 0
|
||||
nsfs /.deltas/run/netns/default nsfs rw 0 0
|
||||
nsfs /run/netns/ns-OOB-Management nsfs rw 0 0
|
||||
nsfs /.deltas/run/netns/ns-OOB-Management nsfs rw 0 0
|
||||
`,
|
||||
expected: "/run/netns",
|
||||
},
|
||||
{
|
||||
desc: "Not mounted",
|
||||
input: `
|
||||
none / aufs rw,relatime,si=7aaed56e5ecd215c 0 0
|
||||
none /.overlay tmpfs rw,relatime,size=593256k,mode=755,idr=enabled 0 0
|
||||
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
|
||||
sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
devtmpfs /dev devtmpfs rw,size=8192k,nr_inodes=485215,mode=755 0 0
|
||||
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
|
||||
tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0
|
||||
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
|
||||
tmpfs /run tmpfs rw,nosuid,nodev,mode=755 0 0
|
||||
tmpfs /sys/fs/cgroup tmpfs rw,nosuid,nodev,noexec,mode=755 0 0
|
||||
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,name=systemd 0 0
|
||||
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
|
||||
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
|
||||
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
|
||||
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
|
||||
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
|
||||
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
|
||||
cgroup /sys/fs/cgroup/net_cls cgroup rw,nosuid,nodev,noexec,relatime,net_cls 0 0
|
||||
configfs /sys/kernel/config configfs rw,relatime 0 0
|
||||
debugfs /sys/kernel/debug debugfs rw,relatime 0 0
|
||||
tmpfs /tmp tmpfs rw,size=593256k 0 0
|
||||
hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0
|
||||
mqueue /dev/mqueue mqueue rw,relatime 0 0
|
||||
tmpfs /.deltas tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/run tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /.deltas/run/netns tmpfs rw,relatime 0 0
|
||||
tmpfs /var/tmp tmpfs rw,relatime,size=65536k 0 0
|
||||
tmpfs /var/core tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/log tmpfs rw,relatime,size=395504k 0 0
|
||||
tmpfs /var/shmem tmpfs rw,relatime,size=988756k 0 0
|
||||
/monitor /monitor debugfs rw,relatime 0 0
|
||||
/dev/sda1 /mnt/flash vfat rw,dirsync,noatime,gid=88,fmask=0007,dmask=0007,allow_utime=0020 0 0
|
||||
`,
|
||||
err: "can't find the netns mount",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
r, err := getNsDirFromProcMounts(strings.NewReader(tc.input))
|
||||
if err != nil {
|
||||
if tc.err == "" || !strings.Contains(err.Error(), tc.err) {
|
||||
t.Errorf("%v: unexpected error %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if r != tc.expected {
|
||||
t.Errorf("%v: expected %v, got %v", tc.desc, tc.expected, r)
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/aristanetworks/goarista/openconfig/client/client.go
generated
vendored
2
vendor/github.com/aristanetworks/goarista/openconfig/client/client.go
generated
vendored
@ -38,7 +38,7 @@ func New(username, password, addr string, opts []grpc.DialOption) *Client {
|
||||
}
|
||||
// Make sure we don't move past the grpc.Dial() call until we actually
|
||||
// established an HTTP/2 connection successfully.
|
||||
opts = append(opts, grpc.WithBlock(), grpc.WithWaitForHandshake())
|
||||
opts = append(opts, grpc.WithBlock())
|
||||
conn, err := grpc.Dial(addr, opts...)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to dial: %s", err)
|
||||
|
33
vendor/github.com/aristanetworks/goarista/openconfig/client/flags.go
generated
vendored
33
vendor/github.com/aristanetworks/goarista/openconfig/client/flags.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/aristanetworks/glog"
|
||||
@ -16,6 +17,29 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
// HostnameArg is the value to be replaced by the actual hostname
|
||||
HostnameArg = "HOSTNAME"
|
||||
)
|
||||
|
||||
// ParseHostnames parses a comma-separated list of names and replaces HOSTNAME with the current
|
||||
// hostname in it
|
||||
func ParseHostnames(list string) ([]string, error) {
|
||||
items := strings.Split(list, ",")
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, len(items))
|
||||
for i, name := range items {
|
||||
if name == HostnameArg {
|
||||
name = hostname
|
||||
}
|
||||
names[i] = name
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// ParseFlags registers some additional common flags,
|
||||
// parses the flags, and returns the resulting gRPC options,
|
||||
// and other settings to connect to the gRPC interface.
|
||||
@ -24,7 +48,8 @@ func ParseFlags() (username string, password string, subscriptions, addrs []stri
|
||||
|
||||
var (
|
||||
addrsFlag = flag.String("addrs", "localhost:6030",
|
||||
"Comma-separated list of addresses of OpenConfig gRPC servers")
|
||||
"Comma-separated list of addresses of OpenConfig gRPC servers. The address 'HOSTNAME' "+
|
||||
"is replaced by the current hostname.")
|
||||
|
||||
caFileFlag = flag.String("cafile", "",
|
||||
"Path to server TLS certificate file")
|
||||
@ -78,7 +103,11 @@ func ParseFlags() (username string, password string, subscriptions, addrs []stri
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
addrs = strings.Split(*addrsFlag, ",")
|
||||
var err error
|
||||
addrs, err = ParseHostnames(*addrsFlag)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
subscriptions = strings.Split(*subscribeFlag, ",")
|
||||
return *usernameFlag, *passwordFlag, subscriptions, addrs, opts
|
||||
}
|
||||
|
110
vendor/github.com/aristanetworks/goarista/path/map.go
generated
vendored
110
vendor/github.com/aristanetworks/goarista/path/map.go
generated
vendored
@ -5,9 +5,9 @@
|
||||
package path
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/aristanetworks/goarista/key"
|
||||
)
|
||||
@ -33,32 +33,6 @@ type VisitorFunc func(v interface{}) error
|
||||
// general case, time complexity is linear with respect
|
||||
// to the length of p but it can be as bad as O(2^len(p))
|
||||
// if there are a lot of paths with wildcards registered.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// a := path.New("foo", "bar", "baz")
|
||||
// b := path.New("foo", path.Wildcard, "baz")
|
||||
// c := path.New(path.Wildcard, "bar", "baz")
|
||||
// d := path.New("foo", "bar", path.Wildcard)
|
||||
// e := path.New(path.Wildcard, path.Wildcard, "baz")
|
||||
// f := path.New(path.Wildcard, "bar", path.Wildcard)
|
||||
// g := path.New("foo", path.Wildcard, path.Wildcard)
|
||||
// h := path.New(path.Wildcard, path.Wildcard, path.Wildcard)
|
||||
//
|
||||
// m.Set(a, 1)
|
||||
// m.Set(b, 2)
|
||||
// m.Set(c, 3)
|
||||
// m.Set(d, 4)
|
||||
// m.Set(e, 5)
|
||||
// m.Set(f, 6)
|
||||
// m.Set(g, 7)
|
||||
// m.Set(h, 8)
|
||||
//
|
||||
// p := path.New("foo", "bar", "baz")
|
||||
//
|
||||
// m.Visit(p, fn)
|
||||
//
|
||||
// Result: fn(1), fn(2), fn(3), fn(4), fn(5), fn(6), fn(7) and fn(8)
|
||||
func (m *Map) Visit(p key.Path, fn VisitorFunc) error {
|
||||
for i, element := range p {
|
||||
if m.wildcard != nil {
|
||||
@ -80,26 +54,6 @@ func (m *Map) Visit(p key.Path, fn VisitorFunc) error {
|
||||
|
||||
// VisitPrefixes calls a function fn for every value in the
|
||||
// Map that is registered with a prefix of a path p.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// a := path.New()
|
||||
// b := path.New("foo")
|
||||
// c := path.New("foo", "bar")
|
||||
// d := path.New("foo", "baz")
|
||||
// e := path.New(path.Wildcard, "bar")
|
||||
//
|
||||
// m.Set(a, 1)
|
||||
// m.Set(b, 2)
|
||||
// m.Set(c, 3)
|
||||
// m.Set(d, 4)
|
||||
// m.Set(e, 5)
|
||||
//
|
||||
// p := path.New("foo", "bar", "baz")
|
||||
//
|
||||
// m.VisitPrefixes(p, fn)
|
||||
//
|
||||
// Result: fn(1), fn(2), fn(3), fn(5)
|
||||
func (m *Map) VisitPrefixes(p key.Path, fn VisitorFunc) error {
|
||||
for i, element := range p {
|
||||
if m.ok {
|
||||
@ -128,24 +82,6 @@ func (m *Map) VisitPrefixes(p key.Path, fn VisitorFunc) error {
|
||||
// registerd with a path that is prefixed by p. This method
|
||||
// can be used to visit every registered path if p is the
|
||||
// empty path (or root path) which prefixes all paths.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// a := path.New("foo")
|
||||
// b := path.New("foo", "bar")
|
||||
// c := path.New("foo", "bar", "baz")
|
||||
// d := path.New("foo", path.Wildcard)
|
||||
//
|
||||
// m.Set(a, 1)
|
||||
// m.Set(b, 2)
|
||||
// m.Set(c, 3)
|
||||
// m.Set(d, 4)
|
||||
//
|
||||
// p := path.New("foo", "bar")
|
||||
//
|
||||
// m.VisitPrefixed(p, fn)
|
||||
//
|
||||
// Result: fn(2), fn(3), fn(4)
|
||||
func (m *Map) VisitPrefixed(p key.Path, fn VisitorFunc) error {
|
||||
for i, element := range p {
|
||||
if m.wildcard != nil {
|
||||
@ -181,21 +117,15 @@ func (m *Map) visitSubtree(fn VisitorFunc) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEmpty returns true if no paths have been registered, false otherwise.
|
||||
func (m *Map) IsEmpty() bool {
|
||||
return m.wildcard == nil && len(m.children) == 0 && !m.ok
|
||||
}
|
||||
|
||||
// Get returns the value registered with an exact match of a
|
||||
// path p. If there is no exact match for p, Get returns nil
|
||||
// and false. If p has an exact match and it is set to true,
|
||||
// Get returns nil and true.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// m.Set(path.New("foo", "bar"), 1)
|
||||
// m.Set(path.New("baz", "qux"), nil)
|
||||
//
|
||||
// a := m.Get(path.New("foo", "bar"))
|
||||
// b := m.Get(path.New("foo", path.Wildcard))
|
||||
// c, ok := m.Get(path.New("baz", "qux"))
|
||||
//
|
||||
// Result: a == 1, b == nil, c == nil and ok == true
|
||||
func (m *Map) Get(p key.Path) (interface{}, bool) {
|
||||
for _, element := range p {
|
||||
if element.Equal(Wildcard) {
|
||||
@ -215,18 +145,7 @@ func (m *Map) Get(p key.Path) (interface{}, bool) {
|
||||
}
|
||||
|
||||
// Set registers a path p with a value. If the path was already
|
||||
// registered with a value it returns true and false otherwise.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// p := path.New("foo", "bar")
|
||||
//
|
||||
// a := m.Set(p, 0)
|
||||
// b := m.Set(p, 1)
|
||||
//
|
||||
// v := m.Get(p)
|
||||
//
|
||||
// Result: a == false, b == true and v == 1
|
||||
// registered with a value it returns false and true otherwise.
|
||||
func (m *Map) Set(p key.Path, v interface{}) bool {
|
||||
for _, element := range p {
|
||||
if element.Equal(Wildcard) {
|
||||
@ -253,17 +172,6 @@ func (m *Map) Set(p key.Path, v interface{}) bool {
|
||||
|
||||
// Delete unregisters the value registered with a path. It
|
||||
// returns true if a value was deleted and false otherwise.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// p := path.New("foo", "bar")
|
||||
//
|
||||
// m.Set(p, 0)
|
||||
//
|
||||
// a := m.Delete(p)
|
||||
// b := m.Delete(p)
|
||||
//
|
||||
// Result: a == true and b == false
|
||||
func (m *Map) Delete(p key.Path) bool {
|
||||
maps := make([]*Map, len(p)+1)
|
||||
for i, element := range p {
|
||||
@ -303,12 +211,12 @@ func (m *Map) Delete(p key.Path) bool {
|
||||
}
|
||||
|
||||
func (m *Map) String() string {
|
||||
var b bytes.Buffer
|
||||
var b strings.Builder
|
||||
m.write(&b, "")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (m *Map) write(b *bytes.Buffer, indent string) {
|
||||
func (m *Map) write(b *strings.Builder, indent string) {
|
||||
if m.ok {
|
||||
b.WriteString(indent)
|
||||
fmt.Fprintf(b, "Val: %v", m.val)
|
||||
|
41
vendor/github.com/aristanetworks/goarista/path/map_test.go
generated
vendored
41
vendor/github.com/aristanetworks/goarista/path/map_test.go
generated
vendored
@ -20,6 +20,47 @@ func accumulator(counter map[int]int) VisitorFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsEmpty(t *testing.T) {
|
||||
m := Map{}
|
||||
|
||||
if !m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return true; Got false")
|
||||
}
|
||||
|
||||
nonWildcardPath := key.Path{key.New("foo")}
|
||||
wildcardPath := key.Path{Wildcard, key.New("bar"), key.New("baz")}
|
||||
|
||||
m.Set(nonWildcardPath, 0)
|
||||
if m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return false; Got true")
|
||||
}
|
||||
|
||||
m.Set(wildcardPath, 2)
|
||||
if m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return false; Got true")
|
||||
}
|
||||
|
||||
m.Delete(nonWildcardPath)
|
||||
if m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return false; Got true")
|
||||
}
|
||||
|
||||
m.Delete(wildcardPath)
|
||||
if !m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return true; Got false")
|
||||
}
|
||||
|
||||
m.Set(nil, nil)
|
||||
if m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return false; Got true")
|
||||
}
|
||||
|
||||
m.Delete(nil)
|
||||
if !m.IsEmpty() {
|
||||
t.Errorf("Expected IsEmpty() to return true; Got false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapSet(t *testing.T) {
|
||||
m := Map{}
|
||||
a := m.Set(key.Path{key.New("foo")}, 0)
|
||||
|
125
vendor/github.com/aristanetworks/goarista/path/mapexample_test.go
generated
vendored
Normal file
125
vendor/github.com/aristanetworks/goarista/path/mapexample_test.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright (c) 2019 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package path_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/aristanetworks/goarista/path"
|
||||
)
|
||||
|
||||
func ExampleMap_Visit() {
|
||||
var m path.Map
|
||||
m.Set(path.New("foo", "bar", "baz"), 1)
|
||||
m.Set(path.New("foo", path.Wildcard, "baz"), 2)
|
||||
m.Set(path.New(path.Wildcard, "bar", "baz"), 3)
|
||||
m.Set(path.New("foo", "bar", path.Wildcard), 4)
|
||||
m.Set(path.New(path.Wildcard, path.Wildcard, "baz"), 5)
|
||||
m.Set(path.New(path.Wildcard, "bar", path.Wildcard), 6)
|
||||
m.Set(path.New("foo", path.Wildcard, path.Wildcard), 7)
|
||||
m.Set(path.New(path.Wildcard, path.Wildcard, path.Wildcard), 8)
|
||||
|
||||
p := path.New("foo", "bar", "baz")
|
||||
|
||||
var nums []int
|
||||
m.Visit(p, func(v interface{}) error {
|
||||
nums = append(nums, v.(int))
|
||||
return nil
|
||||
})
|
||||
sort.Ints(nums)
|
||||
fmt.Println(nums)
|
||||
|
||||
// Output: [1 2 3 4 5 6 7 8]
|
||||
}
|
||||
|
||||
func ExampleMap_VisitPrefixes() {
|
||||
var m path.Map
|
||||
m.Set(path.New(), 1)
|
||||
m.Set(path.New("foo"), 2)
|
||||
m.Set(path.New("foo", "bar"), 3)
|
||||
m.Set(path.New("foo", "baz"), 4)
|
||||
m.Set(path.New(path.Wildcard, "bar"), 5)
|
||||
|
||||
p := path.New("foo", "bar", "baz")
|
||||
|
||||
var nums []int
|
||||
m.VisitPrefixes(p, func(v interface{}) error {
|
||||
nums = append(nums, v.(int))
|
||||
return nil
|
||||
})
|
||||
sort.Ints(nums)
|
||||
fmt.Println(nums)
|
||||
|
||||
// Output: [1 2 3 5]
|
||||
}
|
||||
|
||||
func ExampleMap_VisitPrefixed() {
|
||||
var m path.Map
|
||||
m.Set(path.New("foo"), 1)
|
||||
m.Set(path.New("foo", "bar"), 2)
|
||||
m.Set(path.New("foo", "bar", "baz"), 3)
|
||||
m.Set(path.New("foo", path.Wildcard), 4)
|
||||
|
||||
p := path.New("foo", "bar")
|
||||
|
||||
var nums []int
|
||||
m.VisitPrefixed(p, func(v interface{}) error {
|
||||
nums = append(nums, v.(int))
|
||||
return nil
|
||||
})
|
||||
sort.Ints(nums)
|
||||
fmt.Println(nums)
|
||||
|
||||
// Ouput: [2 3 4]
|
||||
}
|
||||
|
||||
func ExampleMap_Get() {
|
||||
var m path.Map
|
||||
m.Set(path.New("foo", "bar"), 1)
|
||||
m.Set(path.New("baz", "qux"), nil)
|
||||
|
||||
a, ok := m.Get(path.New("foo", "bar"))
|
||||
fmt.Printf("a = %v, ok = %t\n", a, ok)
|
||||
b, ok := m.Get(path.New("foo", path.Wildcard))
|
||||
fmt.Printf("b = %v, ok = %t\n", b, ok)
|
||||
c, ok := m.Get(path.New("baz", "qux"))
|
||||
fmt.Printf("c = %v, ok = %t\n", c, ok)
|
||||
|
||||
// Output:
|
||||
// a = 1, ok = true
|
||||
// b = <nil>, ok = false
|
||||
// c = <nil>, ok = true
|
||||
}
|
||||
|
||||
func ExampleMap_Set() {
|
||||
var m path.Map
|
||||
p := path.New("foo", "bar")
|
||||
|
||||
fmt.Println(m.Set(p, 0))
|
||||
fmt.Println(m.Set(p, 1))
|
||||
|
||||
a, ok := m.Get(p)
|
||||
fmt.Printf("a = %v, ok = %t\n", a, ok)
|
||||
|
||||
// Output:
|
||||
// true
|
||||
// false
|
||||
// a = 1, ok = true
|
||||
}
|
||||
|
||||
func ExampleMap_Delete() {
|
||||
var m path.Map
|
||||
p := path.New("foo", "bar")
|
||||
|
||||
m.Set(p, 0)
|
||||
|
||||
fmt.Println(m.Delete(p))
|
||||
fmt.Println(m.Delete(p))
|
||||
|
||||
// Output:
|
||||
// true
|
||||
// false
|
||||
}
|
7
vendor/github.com/aristanetworks/goarista/sizeof/sizeof.go
generated
vendored
7
vendor/github.com/aristanetworks/goarista/sizeof/sizeof.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
// Copyright (c) 2017 Arista Networks, Inc. All rights reserved.
|
||||
// Arista Networks, Inc. Confidential and Proprietary.
|
||||
// Subject to Arista Networks, Inc.'s EULA.
|
||||
// FOR INTERNAL USE ONLY. NOT FOR DISTRIBUTION.
|
||||
// Copyright (c) 2017 Arista Networks, Inc.
|
||||
// Use of this source code is governed by the Apache License 2.0
|
||||
// that can be found in the COPYING file.
|
||||
|
||||
package sizeof
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user